xref: /linux/drivers/iommu/intel/perfmon.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1a6a5006dSKan Liang // SPDX-License-Identifier: GPL-2.0-only
2a6a5006dSKan Liang /*
3a6a5006dSKan Liang  * Support Intel IOMMU PerfMon
4a6a5006dSKan Liang  * Copyright(c) 2023 Intel Corporation.
5a6a5006dSKan Liang  */
6a6a5006dSKan Liang #define pr_fmt(fmt)	"DMAR: " fmt
7a6a5006dSKan Liang #define dev_fmt(fmt)	pr_fmt(fmt)
8a6a5006dSKan Liang 
9a6a5006dSKan Liang #include <linux/dmar.h>
10a6a5006dSKan Liang #include "iommu.h"
11a6a5006dSKan Liang #include "perfmon.h"
12a6a5006dSKan Liang 
137232ab8bSKan Liang PMU_FORMAT_ATTR(event,		"config:0-27");		/* ES: Events Select */
147232ab8bSKan Liang PMU_FORMAT_ATTR(event_group,	"config:28-31");	/* EGI: Event Group Index */
157232ab8bSKan Liang 
167232ab8bSKan Liang static struct attribute *iommu_pmu_format_attrs[] = {
177232ab8bSKan Liang 	&format_attr_event_group.attr,
187232ab8bSKan Liang 	&format_attr_event.attr,
197232ab8bSKan Liang 	NULL
207232ab8bSKan Liang };
217232ab8bSKan Liang 
227232ab8bSKan Liang static struct attribute_group iommu_pmu_format_attr_group = {
237232ab8bSKan Liang 	.name = "format",
247232ab8bSKan Liang 	.attrs = iommu_pmu_format_attrs,
257232ab8bSKan Liang };
267232ab8bSKan Liang 
277232ab8bSKan Liang /* The available events are added in attr_update later */
287232ab8bSKan Liang static struct attribute *attrs_empty[] = {
297232ab8bSKan Liang 	NULL
307232ab8bSKan Liang };
317232ab8bSKan Liang 
327232ab8bSKan Liang static struct attribute_group iommu_pmu_events_attr_group = {
337232ab8bSKan Liang 	.name = "events",
347232ab8bSKan Liang 	.attrs = attrs_empty,
357232ab8bSKan Liang };
367232ab8bSKan Liang 
377232ab8bSKan Liang static const struct attribute_group *iommu_pmu_attr_groups[] = {
387232ab8bSKan Liang 	&iommu_pmu_format_attr_group,
397232ab8bSKan Liang 	&iommu_pmu_events_attr_group,
407232ab8bSKan Liang 	NULL
417232ab8bSKan Liang };
427232ab8bSKan Liang 
dev_to_iommu_pmu(struct device * dev)437232ab8bSKan Liang static inline struct iommu_pmu *dev_to_iommu_pmu(struct device *dev)
447232ab8bSKan Liang {
457232ab8bSKan Liang 	/*
467232ab8bSKan Liang 	 * The perf_event creates its own dev for each PMU.
477232ab8bSKan Liang 	 * See pmu_dev_alloc()
487232ab8bSKan Liang 	 */
497232ab8bSKan Liang 	return container_of(dev_get_drvdata(dev), struct iommu_pmu, pmu);
507232ab8bSKan Liang }
517232ab8bSKan Liang 
527232ab8bSKan Liang #define IOMMU_PMU_ATTR(_name, _format, _filter)				\
537232ab8bSKan Liang 	PMU_FORMAT_ATTR(_name, _format);				\
547232ab8bSKan Liang 									\
557232ab8bSKan Liang static struct attribute *_name##_attr[] = {				\
567232ab8bSKan Liang 	&format_attr_##_name.attr,					\
577232ab8bSKan Liang 	NULL								\
587232ab8bSKan Liang };									\
597232ab8bSKan Liang 									\
607232ab8bSKan Liang static umode_t								\
617232ab8bSKan Liang _name##_is_visible(struct kobject *kobj, struct attribute *attr, int i)	\
627232ab8bSKan Liang {									\
637232ab8bSKan Liang 	struct device *dev = kobj_to_dev(kobj);				\
647232ab8bSKan Liang 	struct iommu_pmu *iommu_pmu = dev_to_iommu_pmu(dev);		\
657232ab8bSKan Liang 									\
667232ab8bSKan Liang 	if (!iommu_pmu)							\
677232ab8bSKan Liang 		return 0;						\
687232ab8bSKan Liang 	return (iommu_pmu->filter & _filter) ? attr->mode : 0;		\
697232ab8bSKan Liang }									\
707232ab8bSKan Liang 									\
717232ab8bSKan Liang static struct attribute_group _name = {					\
727232ab8bSKan Liang 	.name		= "format",					\
737232ab8bSKan Liang 	.attrs		= _name##_attr,					\
747232ab8bSKan Liang 	.is_visible	= _name##_is_visible,				\
757232ab8bSKan Liang };
767232ab8bSKan Liang 
777232ab8bSKan Liang IOMMU_PMU_ATTR(filter_requester_id_en,	"config1:0",		IOMMU_PMU_FILTER_REQUESTER_ID);
787232ab8bSKan Liang IOMMU_PMU_ATTR(filter_domain_en,	"config1:1",		IOMMU_PMU_FILTER_DOMAIN);
797232ab8bSKan Liang IOMMU_PMU_ATTR(filter_pasid_en,		"config1:2",		IOMMU_PMU_FILTER_PASID);
807232ab8bSKan Liang IOMMU_PMU_ATTR(filter_ats_en,		"config1:3",		IOMMU_PMU_FILTER_ATS);
817232ab8bSKan Liang IOMMU_PMU_ATTR(filter_page_table_en,	"config1:4",		IOMMU_PMU_FILTER_PAGE_TABLE);
827232ab8bSKan Liang IOMMU_PMU_ATTR(filter_requester_id,	"config1:16-31",	IOMMU_PMU_FILTER_REQUESTER_ID);
837232ab8bSKan Liang IOMMU_PMU_ATTR(filter_domain,		"config1:32-47",	IOMMU_PMU_FILTER_DOMAIN);
847232ab8bSKan Liang IOMMU_PMU_ATTR(filter_pasid,		"config2:0-21",		IOMMU_PMU_FILTER_PASID);
857232ab8bSKan Liang IOMMU_PMU_ATTR(filter_ats,		"config2:24-28",	IOMMU_PMU_FILTER_ATS);
867232ab8bSKan Liang IOMMU_PMU_ATTR(filter_page_table,	"config2:32-36",	IOMMU_PMU_FILTER_PAGE_TABLE);
877232ab8bSKan Liang 
887232ab8bSKan Liang #define iommu_pmu_en_requester_id(e)		((e) & 0x1)
897232ab8bSKan Liang #define iommu_pmu_en_domain(e)			(((e) >> 1) & 0x1)
907232ab8bSKan Liang #define iommu_pmu_en_pasid(e)			(((e) >> 2) & 0x1)
917232ab8bSKan Liang #define iommu_pmu_en_ats(e)			(((e) >> 3) & 0x1)
927232ab8bSKan Liang #define iommu_pmu_en_page_table(e)		(((e) >> 4) & 0x1)
937232ab8bSKan Liang #define iommu_pmu_get_requester_id(filter)	(((filter) >> 16) & 0xffff)
947232ab8bSKan Liang #define iommu_pmu_get_domain(filter)		(((filter) >> 32) & 0xffff)
957232ab8bSKan Liang #define iommu_pmu_get_pasid(filter)		((filter) & 0x3fffff)
967232ab8bSKan Liang #define iommu_pmu_get_ats(filter)		(((filter) >> 24) & 0x1f)
977232ab8bSKan Liang #define iommu_pmu_get_page_table(filter)	(((filter) >> 32) & 0x1f)
987232ab8bSKan Liang 
997232ab8bSKan Liang #define iommu_pmu_set_filter(_name, _config, _filter, _idx, _econfig)		\
1007232ab8bSKan Liang {										\
1017232ab8bSKan Liang 	if ((iommu_pmu->filter & _filter) && iommu_pmu_en_##_name(_econfig)) {	\
1027232ab8bSKan Liang 		dmar_writel(iommu_pmu->cfg_reg + _idx * IOMMU_PMU_CFG_OFFSET +	\
1037232ab8bSKan Liang 			    IOMMU_PMU_CFG_SIZE +				\
1047232ab8bSKan Liang 			    (ffs(_filter) - 1) * IOMMU_PMU_CFG_FILTERS_OFFSET,	\
1057232ab8bSKan Liang 			    iommu_pmu_get_##_name(_config) | IOMMU_PMU_FILTER_EN);\
1067232ab8bSKan Liang 	}									\
1077232ab8bSKan Liang }
1087232ab8bSKan Liang 
1097232ab8bSKan Liang #define iommu_pmu_clear_filter(_filter, _idx)					\
1107232ab8bSKan Liang {										\
1117232ab8bSKan Liang 	if (iommu_pmu->filter & _filter) {					\
1127232ab8bSKan Liang 		dmar_writel(iommu_pmu->cfg_reg + _idx * IOMMU_PMU_CFG_OFFSET +	\
1137232ab8bSKan Liang 			    IOMMU_PMU_CFG_SIZE +				\
1147232ab8bSKan Liang 			    (ffs(_filter) - 1) * IOMMU_PMU_CFG_FILTERS_OFFSET,	\
1157232ab8bSKan Liang 			    0);							\
1167232ab8bSKan Liang 	}									\
1177232ab8bSKan Liang }
1187232ab8bSKan Liang 
1197232ab8bSKan Liang /*
1207232ab8bSKan Liang  * Define the event attr related functions
1217232ab8bSKan Liang  * Input: _name: event attr name
1227232ab8bSKan Liang  *        _string: string of the event in sysfs
1237232ab8bSKan Liang  *        _g_idx: event group encoding
1247232ab8bSKan Liang  *        _event: event encoding
1257232ab8bSKan Liang  */
1267232ab8bSKan Liang #define IOMMU_PMU_EVENT_ATTR(_name, _string, _g_idx, _event)			\
1277232ab8bSKan Liang 	PMU_EVENT_ATTR_STRING(_name, event_attr_##_name, _string)		\
1287232ab8bSKan Liang 										\
1297232ab8bSKan Liang static struct attribute *_name##_attr[] = {					\
1307232ab8bSKan Liang 	&event_attr_##_name.attr.attr,						\
1317232ab8bSKan Liang 	NULL									\
1327232ab8bSKan Liang };										\
1337232ab8bSKan Liang 										\
1347232ab8bSKan Liang static umode_t									\
1357232ab8bSKan Liang _name##_is_visible(struct kobject *kobj, struct attribute *attr, int i)		\
1367232ab8bSKan Liang {										\
1377232ab8bSKan Liang 	struct device *dev = kobj_to_dev(kobj);					\
1387232ab8bSKan Liang 	struct iommu_pmu *iommu_pmu = dev_to_iommu_pmu(dev);			\
1397232ab8bSKan Liang 										\
1407232ab8bSKan Liang 	if (!iommu_pmu)								\
1417232ab8bSKan Liang 		return 0;							\
1427232ab8bSKan Liang 	return (iommu_pmu->evcap[_g_idx] & _event) ? attr->mode : 0;		\
1437232ab8bSKan Liang }										\
1447232ab8bSKan Liang 										\
1457232ab8bSKan Liang static struct attribute_group _name = {						\
1467232ab8bSKan Liang 	.name		= "events",						\
1477232ab8bSKan Liang 	.attrs		= _name##_attr,						\
1487232ab8bSKan Liang 	.is_visible	= _name##_is_visible,					\
1497232ab8bSKan Liang };
1507232ab8bSKan Liang 
1517232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(iommu_clocks,		"event_group=0x0,event=0x001", 0x0, 0x001)
1527232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(iommu_requests,		"event_group=0x0,event=0x002", 0x0, 0x002)
1537232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(pw_occupancy,		"event_group=0x0,event=0x004", 0x0, 0x004)
1547232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(ats_blocked,		"event_group=0x0,event=0x008", 0x0, 0x008)
1557232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(iommu_mrds,		"event_group=0x1,event=0x001", 0x1, 0x001)
1567232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(iommu_mem_blocked,		"event_group=0x1,event=0x020", 0x1, 0x020)
1577232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(pg_req_posted,		"event_group=0x1,event=0x040", 0x1, 0x040)
1587232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(ctxt_cache_lookup,		"event_group=0x2,event=0x001", 0x2, 0x001)
1597232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(ctxt_cache_hit,		"event_group=0x2,event=0x002", 0x2, 0x002)
1607232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(pasid_cache_lookup,	"event_group=0x2,event=0x004", 0x2, 0x004)
1617232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(pasid_cache_hit,		"event_group=0x2,event=0x008", 0x2, 0x008)
1627232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(ss_nonleaf_lookup,		"event_group=0x2,event=0x010", 0x2, 0x010)
1637232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(ss_nonleaf_hit,		"event_group=0x2,event=0x020", 0x2, 0x020)
1647232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(fs_nonleaf_lookup,		"event_group=0x2,event=0x040", 0x2, 0x040)
1657232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(fs_nonleaf_hit,		"event_group=0x2,event=0x080", 0x2, 0x080)
1667232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(hpt_nonleaf_lookup,	"event_group=0x2,event=0x100", 0x2, 0x100)
1677232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(hpt_nonleaf_hit,		"event_group=0x2,event=0x200", 0x2, 0x200)
1687232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(iotlb_lookup,		"event_group=0x3,event=0x001", 0x3, 0x001)
1697232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(iotlb_hit,			"event_group=0x3,event=0x002", 0x3, 0x002)
1707232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(hpt_leaf_lookup,		"event_group=0x3,event=0x004", 0x3, 0x004)
1717232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(hpt_leaf_hit,		"event_group=0x3,event=0x008", 0x3, 0x008)
1727232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(int_cache_lookup,		"event_group=0x4,event=0x001", 0x4, 0x001)
1737232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(int_cache_hit_nonposted,	"event_group=0x4,event=0x002", 0x4, 0x002)
1747232ab8bSKan Liang IOMMU_PMU_EVENT_ATTR(int_cache_hit_posted,	"event_group=0x4,event=0x004", 0x4, 0x004)
1757232ab8bSKan Liang 
1767232ab8bSKan Liang static const struct attribute_group *iommu_pmu_attr_update[] = {
1777232ab8bSKan Liang 	&filter_requester_id_en,
1787232ab8bSKan Liang 	&filter_domain_en,
1797232ab8bSKan Liang 	&filter_pasid_en,
1807232ab8bSKan Liang 	&filter_ats_en,
1817232ab8bSKan Liang 	&filter_page_table_en,
1827232ab8bSKan Liang 	&filter_requester_id,
1837232ab8bSKan Liang 	&filter_domain,
1847232ab8bSKan Liang 	&filter_pasid,
1857232ab8bSKan Liang 	&filter_ats,
1867232ab8bSKan Liang 	&filter_page_table,
1877232ab8bSKan Liang 	&iommu_clocks,
1887232ab8bSKan Liang 	&iommu_requests,
1897232ab8bSKan Liang 	&pw_occupancy,
1907232ab8bSKan Liang 	&ats_blocked,
1917232ab8bSKan Liang 	&iommu_mrds,
1927232ab8bSKan Liang 	&iommu_mem_blocked,
1937232ab8bSKan Liang 	&pg_req_posted,
1947232ab8bSKan Liang 	&ctxt_cache_lookup,
1957232ab8bSKan Liang 	&ctxt_cache_hit,
1967232ab8bSKan Liang 	&pasid_cache_lookup,
1977232ab8bSKan Liang 	&pasid_cache_hit,
1987232ab8bSKan Liang 	&ss_nonleaf_lookup,
1997232ab8bSKan Liang 	&ss_nonleaf_hit,
2007232ab8bSKan Liang 	&fs_nonleaf_lookup,
2017232ab8bSKan Liang 	&fs_nonleaf_hit,
2027232ab8bSKan Liang 	&hpt_nonleaf_lookup,
2037232ab8bSKan Liang 	&hpt_nonleaf_hit,
2047232ab8bSKan Liang 	&iotlb_lookup,
2057232ab8bSKan Liang 	&iotlb_hit,
2067232ab8bSKan Liang 	&hpt_leaf_lookup,
2077232ab8bSKan Liang 	&hpt_leaf_hit,
2087232ab8bSKan Liang 	&int_cache_lookup,
2097232ab8bSKan Liang 	&int_cache_hit_nonposted,
2107232ab8bSKan Liang 	&int_cache_hit_posted,
2117232ab8bSKan Liang 	NULL
2127232ab8bSKan Liang };
2137232ab8bSKan Liang 
2147232ab8bSKan Liang static inline void __iomem *
iommu_event_base(struct iommu_pmu * iommu_pmu,int idx)2157232ab8bSKan Liang iommu_event_base(struct iommu_pmu *iommu_pmu, int idx)
2167232ab8bSKan Liang {
2177232ab8bSKan Liang 	return iommu_pmu->cntr_reg + idx * iommu_pmu->cntr_stride;
2187232ab8bSKan Liang }
2197232ab8bSKan Liang 
2207232ab8bSKan Liang static inline void __iomem *
iommu_config_base(struct iommu_pmu * iommu_pmu,int idx)2217232ab8bSKan Liang iommu_config_base(struct iommu_pmu *iommu_pmu, int idx)
2227232ab8bSKan Liang {
2237232ab8bSKan Liang 	return iommu_pmu->cfg_reg + idx * IOMMU_PMU_CFG_OFFSET;
2247232ab8bSKan Liang }
2257232ab8bSKan Liang 
iommu_event_to_pmu(struct perf_event * event)2267232ab8bSKan Liang static inline struct iommu_pmu *iommu_event_to_pmu(struct perf_event *event)
2277232ab8bSKan Liang {
2287232ab8bSKan Liang 	return container_of(event->pmu, struct iommu_pmu, pmu);
2297232ab8bSKan Liang }
2307232ab8bSKan Liang 
iommu_event_config(struct perf_event * event)2317232ab8bSKan Liang static inline u64 iommu_event_config(struct perf_event *event)
2327232ab8bSKan Liang {
2337232ab8bSKan Liang 	u64 config = event->attr.config;
2347232ab8bSKan Liang 
2357232ab8bSKan Liang 	return (iommu_event_select(config) << IOMMU_EVENT_CFG_ES_SHIFT) |
2367232ab8bSKan Liang 	       (iommu_event_group(config) << IOMMU_EVENT_CFG_EGI_SHIFT) |
2377232ab8bSKan Liang 	       IOMMU_EVENT_CFG_INT;
2387232ab8bSKan Liang }
2397232ab8bSKan Liang 
is_iommu_pmu_event(struct iommu_pmu * iommu_pmu,struct perf_event * event)2407232ab8bSKan Liang static inline bool is_iommu_pmu_event(struct iommu_pmu *iommu_pmu,
2417232ab8bSKan Liang 				      struct perf_event *event)
2427232ab8bSKan Liang {
2437232ab8bSKan Liang 	return event->pmu == &iommu_pmu->pmu;
2447232ab8bSKan Liang }
2457232ab8bSKan Liang 
iommu_pmu_validate_event(struct perf_event * event)2467232ab8bSKan Liang static int iommu_pmu_validate_event(struct perf_event *event)
2477232ab8bSKan Liang {
2487232ab8bSKan Liang 	struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
2497232ab8bSKan Liang 	u32 event_group = iommu_event_group(event->attr.config);
2507232ab8bSKan Liang 
2517232ab8bSKan Liang 	if (event_group >= iommu_pmu->num_eg)
2527232ab8bSKan Liang 		return -EINVAL;
2537232ab8bSKan Liang 
2547232ab8bSKan Liang 	return 0;
2557232ab8bSKan Liang }
2567232ab8bSKan Liang 
iommu_pmu_validate_group(struct perf_event * event)2577232ab8bSKan Liang static int iommu_pmu_validate_group(struct perf_event *event)
2587232ab8bSKan Liang {
2597232ab8bSKan Liang 	struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
2607232ab8bSKan Liang 	struct perf_event *sibling;
2617232ab8bSKan Liang 	int nr = 0;
2627232ab8bSKan Liang 
2637232ab8bSKan Liang 	/*
2647232ab8bSKan Liang 	 * All events in a group must be scheduled simultaneously.
2657232ab8bSKan Liang 	 * Check whether there is enough counters for all the events.
2667232ab8bSKan Liang 	 */
2677232ab8bSKan Liang 	for_each_sibling_event(sibling, event->group_leader) {
2687232ab8bSKan Liang 		if (!is_iommu_pmu_event(iommu_pmu, sibling) ||
2697232ab8bSKan Liang 		    sibling->state <= PERF_EVENT_STATE_OFF)
2707232ab8bSKan Liang 			continue;
2717232ab8bSKan Liang 
2727232ab8bSKan Liang 		if (++nr > iommu_pmu->num_cntr)
2737232ab8bSKan Liang 			return -EINVAL;
2747232ab8bSKan Liang 	}
2757232ab8bSKan Liang 
2767232ab8bSKan Liang 	return 0;
2777232ab8bSKan Liang }
2787232ab8bSKan Liang 
iommu_pmu_event_init(struct perf_event * event)2797232ab8bSKan Liang static int iommu_pmu_event_init(struct perf_event *event)
2807232ab8bSKan Liang {
2817232ab8bSKan Liang 	struct hw_perf_event *hwc = &event->hw;
2827232ab8bSKan Liang 
2837232ab8bSKan Liang 	if (event->attr.type != event->pmu->type)
2847232ab8bSKan Liang 		return -ENOENT;
2857232ab8bSKan Liang 
2867232ab8bSKan Liang 	/* sampling not supported */
2877232ab8bSKan Liang 	if (event->attr.sample_period)
2887232ab8bSKan Liang 		return -EINVAL;
2897232ab8bSKan Liang 
2907232ab8bSKan Liang 	if (event->cpu < 0)
2917232ab8bSKan Liang 		return -EINVAL;
2927232ab8bSKan Liang 
2937232ab8bSKan Liang 	if (iommu_pmu_validate_event(event))
2947232ab8bSKan Liang 		return -EINVAL;
2957232ab8bSKan Liang 
2967232ab8bSKan Liang 	hwc->config = iommu_event_config(event);
2977232ab8bSKan Liang 
2987232ab8bSKan Liang 	return iommu_pmu_validate_group(event);
2997232ab8bSKan Liang }
3007232ab8bSKan Liang 
iommu_pmu_event_update(struct perf_event * event)3017232ab8bSKan Liang static void iommu_pmu_event_update(struct perf_event *event)
3027232ab8bSKan Liang {
3037232ab8bSKan Liang 	struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
3047232ab8bSKan Liang 	struct hw_perf_event *hwc = &event->hw;
3057232ab8bSKan Liang 	u64 prev_count, new_count, delta;
3067232ab8bSKan Liang 	int shift = 64 - iommu_pmu->cntr_width;
3077232ab8bSKan Liang 
3087232ab8bSKan Liang again:
3097232ab8bSKan Liang 	prev_count = local64_read(&hwc->prev_count);
3107232ab8bSKan Liang 	new_count = dmar_readq(iommu_event_base(iommu_pmu, hwc->idx));
3117232ab8bSKan Liang 	if (local64_xchg(&hwc->prev_count, new_count) != prev_count)
3127232ab8bSKan Liang 		goto again;
3137232ab8bSKan Liang 
3147232ab8bSKan Liang 	/*
3157232ab8bSKan Liang 	 * The counter width is enumerated. Always shift the counter
3167232ab8bSKan Liang 	 * before using it.
3177232ab8bSKan Liang 	 */
3187232ab8bSKan Liang 	delta = (new_count << shift) - (prev_count << shift);
3197232ab8bSKan Liang 	delta >>= shift;
3207232ab8bSKan Liang 
3217232ab8bSKan Liang 	local64_add(delta, &event->count);
3227232ab8bSKan Liang }
3237232ab8bSKan Liang 
iommu_pmu_start(struct perf_event * event,int flags)3247232ab8bSKan Liang static void iommu_pmu_start(struct perf_event *event, int flags)
3257232ab8bSKan Liang {
3267232ab8bSKan Liang 	struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
3277232ab8bSKan Liang 	struct intel_iommu *iommu = iommu_pmu->iommu;
3287232ab8bSKan Liang 	struct hw_perf_event *hwc = &event->hw;
3297232ab8bSKan Liang 	u64 count;
3307232ab8bSKan Liang 
3317232ab8bSKan Liang 	if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
3327232ab8bSKan Liang 		return;
3337232ab8bSKan Liang 
3347232ab8bSKan Liang 	if (WARN_ON_ONCE(hwc->idx < 0 || hwc->idx >= IOMMU_PMU_IDX_MAX))
3357232ab8bSKan Liang 		return;
3367232ab8bSKan Liang 
3377232ab8bSKan Liang 	if (flags & PERF_EF_RELOAD)
3387232ab8bSKan Liang 		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
3397232ab8bSKan Liang 
3407232ab8bSKan Liang 	hwc->state = 0;
3417232ab8bSKan Liang 
3427232ab8bSKan Liang 	/* Always reprogram the period */
3437232ab8bSKan Liang 	count = dmar_readq(iommu_event_base(iommu_pmu, hwc->idx));
3447232ab8bSKan Liang 	local64_set((&hwc->prev_count), count);
3457232ab8bSKan Liang 
3467232ab8bSKan Liang 	/*
3477232ab8bSKan Liang 	 * The error of ecmd will be ignored.
3487232ab8bSKan Liang 	 * - The existing perf_event subsystem doesn't handle the error.
3497232ab8bSKan Liang 	 *   Only IOMMU PMU returns runtime HW error. We don't want to
3507232ab8bSKan Liang 	 *   change the existing generic interfaces for the specific case.
3517232ab8bSKan Liang 	 * - It's a corner case caused by HW, which is very unlikely to
3527232ab8bSKan Liang 	 *   happen. There is nothing SW can do.
3537232ab8bSKan Liang 	 * - The worst case is that the user will get <not count> with
3547232ab8bSKan Liang 	 *   perf command, which can give the user some hints.
3557232ab8bSKan Liang 	 */
3567232ab8bSKan Liang 	ecmd_submit_sync(iommu, DMA_ECMD_ENABLE, hwc->idx, 0);
3577232ab8bSKan Liang 
3587232ab8bSKan Liang 	perf_event_update_userpage(event);
3597232ab8bSKan Liang }
3607232ab8bSKan Liang 
iommu_pmu_stop(struct perf_event * event,int flags)3617232ab8bSKan Liang static void iommu_pmu_stop(struct perf_event *event, int flags)
3627232ab8bSKan Liang {
3637232ab8bSKan Liang 	struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
3647232ab8bSKan Liang 	struct intel_iommu *iommu = iommu_pmu->iommu;
3657232ab8bSKan Liang 	struct hw_perf_event *hwc = &event->hw;
3667232ab8bSKan Liang 
3677232ab8bSKan Liang 	if (!(hwc->state & PERF_HES_STOPPED)) {
3687232ab8bSKan Liang 		ecmd_submit_sync(iommu, DMA_ECMD_DISABLE, hwc->idx, 0);
3697232ab8bSKan Liang 
3707232ab8bSKan Liang 		iommu_pmu_event_update(event);
3717232ab8bSKan Liang 
3727232ab8bSKan Liang 		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
3737232ab8bSKan Liang 	}
3747232ab8bSKan Liang }
3757232ab8bSKan Liang 
3767232ab8bSKan Liang static inline int
iommu_pmu_validate_per_cntr_event(struct iommu_pmu * iommu_pmu,int idx,struct perf_event * event)3777232ab8bSKan Liang iommu_pmu_validate_per_cntr_event(struct iommu_pmu *iommu_pmu,
3787232ab8bSKan Liang 				  int idx, struct perf_event *event)
3797232ab8bSKan Liang {
3807232ab8bSKan Liang 	u32 event_group = iommu_event_group(event->attr.config);
3817232ab8bSKan Liang 	u32 select = iommu_event_select(event->attr.config);
3827232ab8bSKan Liang 
3837232ab8bSKan Liang 	if (!(iommu_pmu->cntr_evcap[idx][event_group] & select))
3847232ab8bSKan Liang 		return -EINVAL;
3857232ab8bSKan Liang 
3867232ab8bSKan Liang 	return 0;
3877232ab8bSKan Liang }
3887232ab8bSKan Liang 
iommu_pmu_assign_event(struct iommu_pmu * iommu_pmu,struct perf_event * event)3897232ab8bSKan Liang static int iommu_pmu_assign_event(struct iommu_pmu *iommu_pmu,
3907232ab8bSKan Liang 				  struct perf_event *event)
3917232ab8bSKan Liang {
3927232ab8bSKan Liang 	struct hw_perf_event *hwc = &event->hw;
3937232ab8bSKan Liang 	int idx;
3947232ab8bSKan Liang 
3957232ab8bSKan Liang 	/*
3967232ab8bSKan Liang 	 * The counters which support limited events are usually at the end.
3977232ab8bSKan Liang 	 * Schedule them first to accommodate more events.
3987232ab8bSKan Liang 	 */
3997232ab8bSKan Liang 	for (idx = iommu_pmu->num_cntr - 1; idx >= 0; idx--) {
4007232ab8bSKan Liang 		if (test_and_set_bit(idx, iommu_pmu->used_mask))
4017232ab8bSKan Liang 			continue;
4027232ab8bSKan Liang 		/* Check per-counter event capabilities */
4037232ab8bSKan Liang 		if (!iommu_pmu_validate_per_cntr_event(iommu_pmu, idx, event))
4047232ab8bSKan Liang 			break;
4057232ab8bSKan Liang 		clear_bit(idx, iommu_pmu->used_mask);
4067232ab8bSKan Liang 	}
4077232ab8bSKan Liang 	if (idx < 0)
4087232ab8bSKan Liang 		return -EINVAL;
4097232ab8bSKan Liang 
4107232ab8bSKan Liang 	iommu_pmu->event_list[idx] = event;
4117232ab8bSKan Liang 	hwc->idx = idx;
4127232ab8bSKan Liang 
4137232ab8bSKan Liang 	/* config events */
4147232ab8bSKan Liang 	dmar_writeq(iommu_config_base(iommu_pmu, idx), hwc->config);
4157232ab8bSKan Liang 
4167232ab8bSKan Liang 	iommu_pmu_set_filter(requester_id, event->attr.config1,
4177232ab8bSKan Liang 			     IOMMU_PMU_FILTER_REQUESTER_ID, idx,
4187232ab8bSKan Liang 			     event->attr.config1);
4197232ab8bSKan Liang 	iommu_pmu_set_filter(domain, event->attr.config1,
4207232ab8bSKan Liang 			     IOMMU_PMU_FILTER_DOMAIN, idx,
4217232ab8bSKan Liang 			     event->attr.config1);
4225b3625a4SXuchun Shang 	iommu_pmu_set_filter(pasid, event->attr.config2,
4237232ab8bSKan Liang 			     IOMMU_PMU_FILTER_PASID, idx,
4247232ab8bSKan Liang 			     event->attr.config1);
4257232ab8bSKan Liang 	iommu_pmu_set_filter(ats, event->attr.config2,
4267232ab8bSKan Liang 			     IOMMU_PMU_FILTER_ATS, idx,
4277232ab8bSKan Liang 			     event->attr.config1);
4287232ab8bSKan Liang 	iommu_pmu_set_filter(page_table, event->attr.config2,
4297232ab8bSKan Liang 			     IOMMU_PMU_FILTER_PAGE_TABLE, idx,
4307232ab8bSKan Liang 			     event->attr.config1);
4317232ab8bSKan Liang 
4327232ab8bSKan Liang 	return 0;
4337232ab8bSKan Liang }
4347232ab8bSKan Liang 
iommu_pmu_add(struct perf_event * event,int flags)4357232ab8bSKan Liang static int iommu_pmu_add(struct perf_event *event, int flags)
4367232ab8bSKan Liang {
4377232ab8bSKan Liang 	struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
4387232ab8bSKan Liang 	struct hw_perf_event *hwc = &event->hw;
4397232ab8bSKan Liang 	int ret;
4407232ab8bSKan Liang 
4417232ab8bSKan Liang 	ret = iommu_pmu_assign_event(iommu_pmu, event);
4427232ab8bSKan Liang 	if (ret < 0)
4437232ab8bSKan Liang 		return ret;
4447232ab8bSKan Liang 
4457232ab8bSKan Liang 	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
4467232ab8bSKan Liang 
4477232ab8bSKan Liang 	if (flags & PERF_EF_START)
4487232ab8bSKan Liang 		iommu_pmu_start(event, 0);
4497232ab8bSKan Liang 
4507232ab8bSKan Liang 	return 0;
4517232ab8bSKan Liang }
4527232ab8bSKan Liang 
iommu_pmu_del(struct perf_event * event,int flags)4537232ab8bSKan Liang static void iommu_pmu_del(struct perf_event *event, int flags)
4547232ab8bSKan Liang {
4557232ab8bSKan Liang 	struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
4567232ab8bSKan Liang 	int idx = event->hw.idx;
4577232ab8bSKan Liang 
4587232ab8bSKan Liang 	iommu_pmu_stop(event, PERF_EF_UPDATE);
4597232ab8bSKan Liang 
4607232ab8bSKan Liang 	iommu_pmu_clear_filter(IOMMU_PMU_FILTER_REQUESTER_ID, idx);
4617232ab8bSKan Liang 	iommu_pmu_clear_filter(IOMMU_PMU_FILTER_DOMAIN, idx);
4627232ab8bSKan Liang 	iommu_pmu_clear_filter(IOMMU_PMU_FILTER_PASID, idx);
4637232ab8bSKan Liang 	iommu_pmu_clear_filter(IOMMU_PMU_FILTER_ATS, idx);
4647232ab8bSKan Liang 	iommu_pmu_clear_filter(IOMMU_PMU_FILTER_PAGE_TABLE, idx);
4657232ab8bSKan Liang 
4667232ab8bSKan Liang 	iommu_pmu->event_list[idx] = NULL;
4677232ab8bSKan Liang 	event->hw.idx = -1;
4687232ab8bSKan Liang 	clear_bit(idx, iommu_pmu->used_mask);
4697232ab8bSKan Liang 
4707232ab8bSKan Liang 	perf_event_update_userpage(event);
4717232ab8bSKan Liang }
4727232ab8bSKan Liang 
iommu_pmu_enable(struct pmu * pmu)4737232ab8bSKan Liang static void iommu_pmu_enable(struct pmu *pmu)
4747232ab8bSKan Liang {
4757232ab8bSKan Liang 	struct iommu_pmu *iommu_pmu = container_of(pmu, struct iommu_pmu, pmu);
4767232ab8bSKan Liang 	struct intel_iommu *iommu = iommu_pmu->iommu;
4777232ab8bSKan Liang 
4787232ab8bSKan Liang 	ecmd_submit_sync(iommu, DMA_ECMD_UNFREEZE, 0, 0);
4797232ab8bSKan Liang }
4807232ab8bSKan Liang 
iommu_pmu_disable(struct pmu * pmu)4817232ab8bSKan Liang static void iommu_pmu_disable(struct pmu *pmu)
4827232ab8bSKan Liang {
4837232ab8bSKan Liang 	struct iommu_pmu *iommu_pmu = container_of(pmu, struct iommu_pmu, pmu);
4847232ab8bSKan Liang 	struct intel_iommu *iommu = iommu_pmu->iommu;
4857232ab8bSKan Liang 
4867232ab8bSKan Liang 	ecmd_submit_sync(iommu, DMA_ECMD_FREEZE, 0, 0);
4877232ab8bSKan Liang }
4887232ab8bSKan Liang 
iommu_pmu_counter_overflow(struct iommu_pmu * iommu_pmu)4894a0d4265SKan Liang static void iommu_pmu_counter_overflow(struct iommu_pmu *iommu_pmu)
4904a0d4265SKan Liang {
4914a0d4265SKan Liang 	struct perf_event *event;
4924a0d4265SKan Liang 	u64 status;
4934a0d4265SKan Liang 	int i;
4944a0d4265SKan Liang 
4954a0d4265SKan Liang 	/*
4964a0d4265SKan Liang 	 * Two counters may be overflowed very close. Always check
4974a0d4265SKan Liang 	 * whether there are more to handle.
4984a0d4265SKan Liang 	 */
4994a0d4265SKan Liang 	while ((status = dmar_readq(iommu_pmu->overflow))) {
5004a0d4265SKan Liang 		for_each_set_bit(i, (unsigned long *)&status, iommu_pmu->num_cntr) {
5014a0d4265SKan Liang 			/*
5024a0d4265SKan Liang 			 * Find the assigned event of the counter.
5034a0d4265SKan Liang 			 * Accumulate the value into the event->count.
5044a0d4265SKan Liang 			 */
5054a0d4265SKan Liang 			event = iommu_pmu->event_list[i];
5064a0d4265SKan Liang 			if (!event) {
5074a0d4265SKan Liang 				pr_warn_once("Cannot find the assigned event for counter %d\n", i);
5084a0d4265SKan Liang 				continue;
5094a0d4265SKan Liang 			}
5104a0d4265SKan Liang 			iommu_pmu_event_update(event);
5114a0d4265SKan Liang 		}
5124a0d4265SKan Liang 
5134a0d4265SKan Liang 		dmar_writeq(iommu_pmu->overflow, status);
5144a0d4265SKan Liang 	}
5154a0d4265SKan Liang }
5164a0d4265SKan Liang 
iommu_pmu_irq_handler(int irq,void * dev_id)5174a0d4265SKan Liang static irqreturn_t iommu_pmu_irq_handler(int irq, void *dev_id)
5184a0d4265SKan Liang {
5194a0d4265SKan Liang 	struct intel_iommu *iommu = dev_id;
5204a0d4265SKan Liang 
5214a0d4265SKan Liang 	if (!dmar_readl(iommu->reg + DMAR_PERFINTRSTS_REG))
5224a0d4265SKan Liang 		return IRQ_NONE;
5234a0d4265SKan Liang 
5244a0d4265SKan Liang 	iommu_pmu_counter_overflow(iommu->pmu);
5254a0d4265SKan Liang 
5264a0d4265SKan Liang 	/* Clear the status bit */
5274a0d4265SKan Liang 	dmar_writel(iommu->reg + DMAR_PERFINTRSTS_REG, DMA_PERFINTRSTS_PIS);
5284a0d4265SKan Liang 
5294a0d4265SKan Liang 	return IRQ_HANDLED;
5304a0d4265SKan Liang }
5314a0d4265SKan Liang 
__iommu_pmu_register(struct intel_iommu * iommu)5327232ab8bSKan Liang static int __iommu_pmu_register(struct intel_iommu *iommu)
5337232ab8bSKan Liang {
5347232ab8bSKan Liang 	struct iommu_pmu *iommu_pmu = iommu->pmu;
5357232ab8bSKan Liang 
5367232ab8bSKan Liang 	iommu_pmu->pmu.name		= iommu->name;
5377232ab8bSKan Liang 	iommu_pmu->pmu.task_ctx_nr	= perf_invalid_context;
5387232ab8bSKan Liang 	iommu_pmu->pmu.event_init	= iommu_pmu_event_init;
5397232ab8bSKan Liang 	iommu_pmu->pmu.pmu_enable	= iommu_pmu_enable;
5407232ab8bSKan Liang 	iommu_pmu->pmu.pmu_disable	= iommu_pmu_disable;
5417232ab8bSKan Liang 	iommu_pmu->pmu.add		= iommu_pmu_add;
5427232ab8bSKan Liang 	iommu_pmu->pmu.del		= iommu_pmu_del;
5437232ab8bSKan Liang 	iommu_pmu->pmu.start		= iommu_pmu_start;
5447232ab8bSKan Liang 	iommu_pmu->pmu.stop		= iommu_pmu_stop;
5457232ab8bSKan Liang 	iommu_pmu->pmu.read		= iommu_pmu_event_update;
5467232ab8bSKan Liang 	iommu_pmu->pmu.attr_groups	= iommu_pmu_attr_groups;
5477232ab8bSKan Liang 	iommu_pmu->pmu.attr_update	= iommu_pmu_attr_update;
5487232ab8bSKan Liang 	iommu_pmu->pmu.capabilities	= PERF_PMU_CAP_NO_EXCLUDE;
549*a8c73b82SKan Liang 	iommu_pmu->pmu.scope		= PERF_PMU_SCOPE_SYS_WIDE;
5507232ab8bSKan Liang 	iommu_pmu->pmu.module		= THIS_MODULE;
5517232ab8bSKan Liang 
5527232ab8bSKan Liang 	return perf_pmu_register(&iommu_pmu->pmu, iommu_pmu->pmu.name, -1);
5537232ab8bSKan Liang }
5547232ab8bSKan Liang 
555a6a5006dSKan Liang static inline void __iomem *
get_perf_reg_address(struct intel_iommu * iommu,u32 offset)556a6a5006dSKan Liang get_perf_reg_address(struct intel_iommu *iommu, u32 offset)
557a6a5006dSKan Liang {
558a6a5006dSKan Liang 	u32 off = dmar_readl(iommu->reg + offset);
559a6a5006dSKan Liang 
560a6a5006dSKan Liang 	return iommu->reg + off;
561a6a5006dSKan Liang }
562a6a5006dSKan Liang 
alloc_iommu_pmu(struct intel_iommu * iommu)563a6a5006dSKan Liang int alloc_iommu_pmu(struct intel_iommu *iommu)
564a6a5006dSKan Liang {
565a6a5006dSKan Liang 	struct iommu_pmu *iommu_pmu;
566a6a5006dSKan Liang 	int i, j, ret;
567a6a5006dSKan Liang 	u64 perfcap;
568a6a5006dSKan Liang 	u32 cap;
569a6a5006dSKan Liang 
570a6a5006dSKan Liang 	if (!ecap_pms(iommu->ecap))
571a6a5006dSKan Liang 		return 0;
572a6a5006dSKan Liang 
573a6a5006dSKan Liang 	/* The IOMMU PMU requires the ECMD support as well */
574a6a5006dSKan Liang 	if (!cap_ecmds(iommu->cap))
575a6a5006dSKan Liang 		return -ENODEV;
576a6a5006dSKan Liang 
577a6a5006dSKan Liang 	perfcap = dmar_readq(iommu->reg + DMAR_PERFCAP_REG);
578a6a5006dSKan Liang 	/* The performance monitoring is not supported. */
579a6a5006dSKan Liang 	if (!perfcap)
580a6a5006dSKan Liang 		return -ENODEV;
581a6a5006dSKan Liang 
582a6a5006dSKan Liang 	/* Sanity check for the number of the counters and event groups */
583a6a5006dSKan Liang 	if (!pcap_num_cntr(perfcap) || !pcap_num_event_group(perfcap))
584a6a5006dSKan Liang 		return -ENODEV;
585a6a5006dSKan Liang 
586a6a5006dSKan Liang 	/* The interrupt on overflow is required */
587a6a5006dSKan Liang 	if (!pcap_interrupt(perfcap))
588a6a5006dSKan Liang 		return -ENODEV;
589a6a5006dSKan Liang 
5907232ab8bSKan Liang 	/* Check required Enhanced Command Capability */
5917232ab8bSKan Liang 	if (!ecmd_has_pmu_essential(iommu))
5927232ab8bSKan Liang 		return -ENODEV;
5937232ab8bSKan Liang 
594a6a5006dSKan Liang 	iommu_pmu = kzalloc(sizeof(*iommu_pmu), GFP_KERNEL);
595a6a5006dSKan Liang 	if (!iommu_pmu)
596a6a5006dSKan Liang 		return -ENOMEM;
597a6a5006dSKan Liang 
598a6a5006dSKan Liang 	iommu_pmu->num_cntr = pcap_num_cntr(perfcap);
5997232ab8bSKan Liang 	if (iommu_pmu->num_cntr > IOMMU_PMU_IDX_MAX) {
6007232ab8bSKan Liang 		pr_warn_once("The number of IOMMU counters %d > max(%d), clipping!",
6017232ab8bSKan Liang 			     iommu_pmu->num_cntr, IOMMU_PMU_IDX_MAX);
6027232ab8bSKan Liang 		iommu_pmu->num_cntr = IOMMU_PMU_IDX_MAX;
6037232ab8bSKan Liang 	}
6047232ab8bSKan Liang 
605a6a5006dSKan Liang 	iommu_pmu->cntr_width = pcap_cntr_width(perfcap);
606a6a5006dSKan Liang 	iommu_pmu->filter = pcap_filters_mask(perfcap);
607a6a5006dSKan Liang 	iommu_pmu->cntr_stride = pcap_cntr_stride(perfcap);
608a6a5006dSKan Liang 	iommu_pmu->num_eg = pcap_num_event_group(perfcap);
609a6a5006dSKan Liang 
610a6a5006dSKan Liang 	iommu_pmu->evcap = kcalloc(iommu_pmu->num_eg, sizeof(u64), GFP_KERNEL);
611a6a5006dSKan Liang 	if (!iommu_pmu->evcap) {
612a6a5006dSKan Liang 		ret = -ENOMEM;
613a6a5006dSKan Liang 		goto free_pmu;
614a6a5006dSKan Liang 	}
615a6a5006dSKan Liang 
616a6a5006dSKan Liang 	/* Parse event group capabilities */
617a6a5006dSKan Liang 	for (i = 0; i < iommu_pmu->num_eg; i++) {
618a6a5006dSKan Liang 		u64 pcap;
619a6a5006dSKan Liang 
620a6a5006dSKan Liang 		pcap = dmar_readq(iommu->reg + DMAR_PERFEVNTCAP_REG +
621a6a5006dSKan Liang 				  i * IOMMU_PMU_CAP_REGS_STEP);
622a6a5006dSKan Liang 		iommu_pmu->evcap[i] = pecap_es(pcap);
623a6a5006dSKan Liang 	}
624a6a5006dSKan Liang 
625a6a5006dSKan Liang 	iommu_pmu->cntr_evcap = kcalloc(iommu_pmu->num_cntr, sizeof(u32 *), GFP_KERNEL);
626a6a5006dSKan Liang 	if (!iommu_pmu->cntr_evcap) {
627a6a5006dSKan Liang 		ret = -ENOMEM;
628a6a5006dSKan Liang 		goto free_pmu_evcap;
629a6a5006dSKan Liang 	}
630a6a5006dSKan Liang 	for (i = 0; i < iommu_pmu->num_cntr; i++) {
631a6a5006dSKan Liang 		iommu_pmu->cntr_evcap[i] = kcalloc(iommu_pmu->num_eg, sizeof(u32), GFP_KERNEL);
632a6a5006dSKan Liang 		if (!iommu_pmu->cntr_evcap[i]) {
633a6a5006dSKan Liang 			ret = -ENOMEM;
634a6a5006dSKan Liang 			goto free_pmu_cntr_evcap;
635a6a5006dSKan Liang 		}
636a6a5006dSKan Liang 		/*
637a6a5006dSKan Liang 		 * Set to the global capabilities, will adjust according
638a6a5006dSKan Liang 		 * to per-counter capabilities later.
639a6a5006dSKan Liang 		 */
640a6a5006dSKan Liang 		for (j = 0; j < iommu_pmu->num_eg; j++)
641a6a5006dSKan Liang 			iommu_pmu->cntr_evcap[i][j] = (u32)iommu_pmu->evcap[j];
642a6a5006dSKan Liang 	}
643a6a5006dSKan Liang 
644a6a5006dSKan Liang 	iommu_pmu->cfg_reg = get_perf_reg_address(iommu, DMAR_PERFCFGOFF_REG);
645a6a5006dSKan Liang 	iommu_pmu->cntr_reg = get_perf_reg_address(iommu, DMAR_PERFCNTROFF_REG);
646a6a5006dSKan Liang 	iommu_pmu->overflow = get_perf_reg_address(iommu, DMAR_PERFOVFOFF_REG);
647a6a5006dSKan Liang 
648a6a5006dSKan Liang 	/*
649a6a5006dSKan Liang 	 * Check per-counter capabilities. All counters should have the
650a6a5006dSKan Liang 	 * same capabilities on Interrupt on Overflow Support and Counter
651a6a5006dSKan Liang 	 * Width.
652a6a5006dSKan Liang 	 */
653a6a5006dSKan Liang 	for (i = 0; i < iommu_pmu->num_cntr; i++) {
654a6a5006dSKan Liang 		cap = dmar_readl(iommu_pmu->cfg_reg +
655a6a5006dSKan Liang 				 i * IOMMU_PMU_CFG_OFFSET +
656a6a5006dSKan Liang 				 IOMMU_PMU_CFG_CNTRCAP_OFFSET);
657a6a5006dSKan Liang 		if (!iommu_cntrcap_pcc(cap))
658a6a5006dSKan Liang 			continue;
659a6a5006dSKan Liang 
660a6a5006dSKan Liang 		/*
661a6a5006dSKan Liang 		 * It's possible that some counters have a different
662a6a5006dSKan Liang 		 * capability because of e.g., HW bug. Check the corner
663a6a5006dSKan Liang 		 * case here and simply drop those counters.
664a6a5006dSKan Liang 		 */
665a6a5006dSKan Liang 		if ((iommu_cntrcap_cw(cap) != iommu_pmu->cntr_width) ||
666a6a5006dSKan Liang 		    !iommu_cntrcap_ios(cap)) {
667a6a5006dSKan Liang 			iommu_pmu->num_cntr = i;
668a6a5006dSKan Liang 			pr_warn("PMU counter capability inconsistent, counter number reduced to %d\n",
669a6a5006dSKan Liang 				iommu_pmu->num_cntr);
670a6a5006dSKan Liang 		}
671a6a5006dSKan Liang 
672a6a5006dSKan Liang 		/* Clear the pre-defined events group */
673a6a5006dSKan Liang 		for (j = 0; j < iommu_pmu->num_eg; j++)
674a6a5006dSKan Liang 			iommu_pmu->cntr_evcap[i][j] = 0;
675a6a5006dSKan Liang 
676a6a5006dSKan Liang 		/* Override with per-counter event capabilities */
677a6a5006dSKan Liang 		for (j = 0; j < iommu_cntrcap_egcnt(cap); j++) {
678a6a5006dSKan Liang 			cap = dmar_readl(iommu_pmu->cfg_reg + i * IOMMU_PMU_CFG_OFFSET +
679a6a5006dSKan Liang 					 IOMMU_PMU_CFG_CNTREVCAP_OFFSET +
680a6a5006dSKan Liang 					 (j * IOMMU_PMU_OFF_REGS_STEP));
681a6a5006dSKan Liang 			iommu_pmu->cntr_evcap[i][iommu_event_group(cap)] = iommu_event_select(cap);
682a6a5006dSKan Liang 			/*
683a6a5006dSKan Liang 			 * Some events may only be supported by a specific counter.
684a6a5006dSKan Liang 			 * Track them in the evcap as well.
685a6a5006dSKan Liang 			 */
686a6a5006dSKan Liang 			iommu_pmu->evcap[iommu_event_group(cap)] |= iommu_event_select(cap);
687a6a5006dSKan Liang 		}
688a6a5006dSKan Liang 	}
689a6a5006dSKan Liang 
690a6a5006dSKan Liang 	iommu_pmu->iommu = iommu;
691a6a5006dSKan Liang 	iommu->pmu = iommu_pmu;
692a6a5006dSKan Liang 
693a6a5006dSKan Liang 	return 0;
694a6a5006dSKan Liang 
695a6a5006dSKan Liang free_pmu_cntr_evcap:
696a6a5006dSKan Liang 	for (i = 0; i < iommu_pmu->num_cntr; i++)
697a6a5006dSKan Liang 		kfree(iommu_pmu->cntr_evcap[i]);
698a6a5006dSKan Liang 	kfree(iommu_pmu->cntr_evcap);
699a6a5006dSKan Liang free_pmu_evcap:
700a6a5006dSKan Liang 	kfree(iommu_pmu->evcap);
701a6a5006dSKan Liang free_pmu:
702a6a5006dSKan Liang 	kfree(iommu_pmu);
703a6a5006dSKan Liang 
704a6a5006dSKan Liang 	return ret;
705a6a5006dSKan Liang }
706a6a5006dSKan Liang 
free_iommu_pmu(struct intel_iommu * iommu)707a6a5006dSKan Liang void free_iommu_pmu(struct intel_iommu *iommu)
708a6a5006dSKan Liang {
709a6a5006dSKan Liang 	struct iommu_pmu *iommu_pmu = iommu->pmu;
710a6a5006dSKan Liang 
711a6a5006dSKan Liang 	if (!iommu_pmu)
712a6a5006dSKan Liang 		return;
713a6a5006dSKan Liang 
714a6a5006dSKan Liang 	if (iommu_pmu->evcap) {
715a6a5006dSKan Liang 		int i;
716a6a5006dSKan Liang 
717a6a5006dSKan Liang 		for (i = 0; i < iommu_pmu->num_cntr; i++)
718a6a5006dSKan Liang 			kfree(iommu_pmu->cntr_evcap[i]);
719a6a5006dSKan Liang 		kfree(iommu_pmu->cntr_evcap);
720a6a5006dSKan Liang 	}
721a6a5006dSKan Liang 	kfree(iommu_pmu->evcap);
722a6a5006dSKan Liang 	kfree(iommu_pmu);
723a6a5006dSKan Liang 	iommu->pmu = NULL;
724a6a5006dSKan Liang }
7257232ab8bSKan Liang 
iommu_pmu_set_interrupt(struct intel_iommu * iommu)7264a0d4265SKan Liang static int iommu_pmu_set_interrupt(struct intel_iommu *iommu)
7274a0d4265SKan Liang {
7284a0d4265SKan Liang 	struct iommu_pmu *iommu_pmu = iommu->pmu;
7294a0d4265SKan Liang 	int irq, ret;
7304a0d4265SKan Liang 
7314a0d4265SKan Liang 	irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PERF + iommu->seq_id, iommu->node, iommu);
7324a0d4265SKan Liang 	if (irq <= 0)
7334a0d4265SKan Liang 		return -EINVAL;
7344a0d4265SKan Liang 
7354a0d4265SKan Liang 	snprintf(iommu_pmu->irq_name, sizeof(iommu_pmu->irq_name), "dmar%d-perf", iommu->seq_id);
7364a0d4265SKan Liang 
7374a0d4265SKan Liang 	iommu->perf_irq = irq;
7384a0d4265SKan Liang 	ret = request_threaded_irq(irq, NULL, iommu_pmu_irq_handler,
7394a0d4265SKan Liang 				   IRQF_ONESHOT, iommu_pmu->irq_name, iommu);
7404a0d4265SKan Liang 	if (ret) {
7414a0d4265SKan Liang 		dmar_free_hwirq(irq);
7424a0d4265SKan Liang 		iommu->perf_irq = 0;
7434a0d4265SKan Liang 		return ret;
7444a0d4265SKan Liang 	}
7454a0d4265SKan Liang 	return 0;
7464a0d4265SKan Liang }
7474a0d4265SKan Liang 
iommu_pmu_unset_interrupt(struct intel_iommu * iommu)7484a0d4265SKan Liang static void iommu_pmu_unset_interrupt(struct intel_iommu *iommu)
7494a0d4265SKan Liang {
7504a0d4265SKan Liang 	if (!iommu->perf_irq)
7514a0d4265SKan Liang 		return;
7524a0d4265SKan Liang 
7534a0d4265SKan Liang 	free_irq(iommu->perf_irq, iommu);
7544a0d4265SKan Liang 	dmar_free_hwirq(iommu->perf_irq);
7554a0d4265SKan Liang 	iommu->perf_irq = 0;
7564a0d4265SKan Liang }
7574a0d4265SKan Liang 
iommu_pmu_register(struct intel_iommu * iommu)75846284c6cSKan Liang void iommu_pmu_register(struct intel_iommu *iommu)
75946284c6cSKan Liang {
76046284c6cSKan Liang 	struct iommu_pmu *iommu_pmu = iommu->pmu;
76146284c6cSKan Liang 
76246284c6cSKan Liang 	if (!iommu_pmu)
76346284c6cSKan Liang 		return;
76446284c6cSKan Liang 
76546284c6cSKan Liang 	if (__iommu_pmu_register(iommu))
76646284c6cSKan Liang 		goto err;
76746284c6cSKan Liang 
7684a0d4265SKan Liang 	/* Set interrupt for overflow */
7694a0d4265SKan Liang 	if (iommu_pmu_set_interrupt(iommu))
770*a8c73b82SKan Liang 		goto unregister;
7714a0d4265SKan Liang 
77246284c6cSKan Liang 	return;
77346284c6cSKan Liang 
77446284c6cSKan Liang unregister:
77546284c6cSKan Liang 	perf_pmu_unregister(&iommu_pmu->pmu);
77646284c6cSKan Liang err:
77746284c6cSKan Liang 	pr_err("Failed to register PMU for iommu (seq_id = %d)\n", iommu->seq_id);
77846284c6cSKan Liang 	free_iommu_pmu(iommu);
7797232ab8bSKan Liang }
7807232ab8bSKan Liang 
iommu_pmu_unregister(struct intel_iommu * iommu)7817232ab8bSKan Liang void iommu_pmu_unregister(struct intel_iommu *iommu)
7827232ab8bSKan Liang {
78346284c6cSKan Liang 	struct iommu_pmu *iommu_pmu = iommu->pmu;
78446284c6cSKan Liang 
78546284c6cSKan Liang 	if (!iommu_pmu)
78646284c6cSKan Liang 		return;
78746284c6cSKan Liang 
7884a0d4265SKan Liang 	iommu_pmu_unset_interrupt(iommu);
78946284c6cSKan Liang 	perf_pmu_unregister(&iommu_pmu->pmu);
7907232ab8bSKan Liang }
791