xref: /linux/arch/powerpc/perf/vpa-dtl.c (revision 5d75aed84d3b6d25c7c4bb4a212b14fae4d1020b)
14708fba1SKajol Jain // SPDX-License-Identifier: GPL-2.0-or-later
24708fba1SKajol Jain /*
34708fba1SKajol Jain  * Perf interface to expose Dispatch Trace Log counters.
44708fba1SKajol Jain  *
54708fba1SKajol Jain  * Copyright (C) 2024 Kajol Jain, IBM Corporation
64708fba1SKajol Jain  */
74708fba1SKajol Jain 
84708fba1SKajol Jain #ifdef CONFIG_PPC_SPLPAR
94708fba1SKajol Jain #define pr_fmt(fmt) "vpa_dtl: " fmt
104708fba1SKajol Jain 
114708fba1SKajol Jain #include <asm/dtl.h>
124708fba1SKajol Jain #include <linux/perf_event.h>
134708fba1SKajol Jain #include <asm/plpar_wrappers.h>
14*5d75aed8SAthira Rajeev #include <linux/vmalloc.h>
154708fba1SKajol Jain 
164708fba1SKajol Jain #define EVENT(_name, _code)     enum{_name = _code}
174708fba1SKajol Jain 
184708fba1SKajol Jain /*
194708fba1SKajol Jain  * Based on Power Architecture Platform Reference(PAPR) documentation,
204708fba1SKajol Jain  * Table 14.14. Per Virtual Processor Area, below Dispatch Trace Log(DTL)
214708fba1SKajol Jain  * Enable Mask used to get corresponding virtual processor dispatch
224708fba1SKajol Jain  * to preempt traces:
234708fba1SKajol Jain  *   DTL_CEDE(0x1): Trace voluntary (OS initiated) virtual
244708fba1SKajol Jain  *   processor waits
254708fba1SKajol Jain  *   DTL_PREEMPT(0x2): Trace time slice preempts
264708fba1SKajol Jain  *   DTL_FAULT(0x4): Trace virtual partition memory page
274708fba1SKajol Jain  faults.
284708fba1SKajol Jain  *   DTL_ALL(0x7): Trace all (DTL_CEDE | DTL_PREEMPT | DTL_FAULT)
294708fba1SKajol Jain  *
304708fba1SKajol Jain  * Event codes based on Dispatch Trace Log Enable Mask.
314708fba1SKajol Jain  */
324708fba1SKajol Jain EVENT(DTL_CEDE,         0x1);
334708fba1SKajol Jain EVENT(DTL_PREEMPT,      0x2);
344708fba1SKajol Jain EVENT(DTL_FAULT,        0x4);
354708fba1SKajol Jain EVENT(DTL_ALL,          0x7);
364708fba1SKajol Jain 
374708fba1SKajol Jain GENERIC_EVENT_ATTR(dtl_cede, DTL_CEDE);
384708fba1SKajol Jain GENERIC_EVENT_ATTR(dtl_preempt, DTL_PREEMPT);
394708fba1SKajol Jain GENERIC_EVENT_ATTR(dtl_fault, DTL_FAULT);
404708fba1SKajol Jain GENERIC_EVENT_ATTR(dtl_all, DTL_ALL);
414708fba1SKajol Jain 
424708fba1SKajol Jain PMU_FORMAT_ATTR(event, "config:0-7");
434708fba1SKajol Jain 
444708fba1SKajol Jain static struct attribute *events_attr[] = {
454708fba1SKajol Jain 	GENERIC_EVENT_PTR(DTL_CEDE),
464708fba1SKajol Jain 	GENERIC_EVENT_PTR(DTL_PREEMPT),
474708fba1SKajol Jain 	GENERIC_EVENT_PTR(DTL_FAULT),
484708fba1SKajol Jain 	GENERIC_EVENT_PTR(DTL_ALL),
494708fba1SKajol Jain 	NULL
504708fba1SKajol Jain };
514708fba1SKajol Jain 
524708fba1SKajol Jain static struct attribute_group event_group = {
534708fba1SKajol Jain 	.name = "events",
544708fba1SKajol Jain 	.attrs = events_attr,
554708fba1SKajol Jain };
564708fba1SKajol Jain 
574708fba1SKajol Jain static struct attribute *format_attrs[] = {
584708fba1SKajol Jain 	&format_attr_event.attr,
594708fba1SKajol Jain 	NULL,
604708fba1SKajol Jain };
614708fba1SKajol Jain 
624708fba1SKajol Jain static const struct attribute_group format_group = {
634708fba1SKajol Jain 	.name = "format",
644708fba1SKajol Jain 	.attrs = format_attrs,
654708fba1SKajol Jain };
664708fba1SKajol Jain 
674708fba1SKajol Jain static const struct attribute_group *attr_groups[] = {
684708fba1SKajol Jain 	&format_group,
694708fba1SKajol Jain 	&event_group,
704708fba1SKajol Jain 	NULL,
714708fba1SKajol Jain };
724708fba1SKajol Jain 
734708fba1SKajol Jain struct vpa_dtl {
744708fba1SKajol Jain 	struct dtl_entry	*buf;
754708fba1SKajol Jain 	u64			last_idx;
764708fba1SKajol Jain };
774708fba1SKajol Jain 
78*5d75aed8SAthira Rajeev struct vpa_pmu_ctx {
79*5d75aed8SAthira Rajeev 	struct perf_output_handle handle;
80*5d75aed8SAthira Rajeev };
81*5d75aed8SAthira Rajeev 
82*5d75aed8SAthira Rajeev struct vpa_pmu_buf {
83*5d75aed8SAthira Rajeev 	int     nr_pages;
84*5d75aed8SAthira Rajeev 	bool    snapshot;
85*5d75aed8SAthira Rajeev 	u64     *base;
86*5d75aed8SAthira Rajeev 	u64     size;
87*5d75aed8SAthira Rajeev 	u64     head;
88*5d75aed8SAthira Rajeev };
89*5d75aed8SAthira Rajeev 
90*5d75aed8SAthira Rajeev static DEFINE_PER_CPU(struct vpa_pmu_ctx, vpa_pmu_ctx);
914708fba1SKajol Jain static DEFINE_PER_CPU(struct vpa_dtl, vpa_dtl_cpu);
924708fba1SKajol Jain 
934708fba1SKajol Jain /* variable to capture reference count for the active dtl threads */
944708fba1SKajol Jain static int dtl_global_refc;
954708fba1SKajol Jain static spinlock_t dtl_global_lock = __SPIN_LOCK_UNLOCKED(dtl_global_lock);
964708fba1SKajol Jain 
974708fba1SKajol Jain /*
984708fba1SKajol Jain  * Function to dump the dispatch trace log buffer data to the
994708fba1SKajol Jain  * perf data.
1004708fba1SKajol Jain  */
1014708fba1SKajol Jain static void vpa_dtl_dump_sample_data(struct perf_event *event)
1024708fba1SKajol Jain {
1034708fba1SKajol Jain 	return;
1044708fba1SKajol Jain }
1054708fba1SKajol Jain 
1064708fba1SKajol Jain /*
1074708fba1SKajol Jain  * The VPA Dispatch Trace log counters do not interrupt on overflow.
1084708fba1SKajol Jain  * Therefore, the kernel needs to poll the counters to avoid missing
1094708fba1SKajol Jain  * an overflow using hrtimer. The timer interval is based on sample_period
1104708fba1SKajol Jain  * count provided by user, and minimum interval is 1 millisecond.
1114708fba1SKajol Jain  */
1124708fba1SKajol Jain static enum hrtimer_restart vpa_dtl_hrtimer_handle(struct hrtimer *hrtimer)
1134708fba1SKajol Jain {
1144708fba1SKajol Jain 	struct perf_event *event;
1154708fba1SKajol Jain 	u64 period;
1164708fba1SKajol Jain 
1174708fba1SKajol Jain 	event = container_of(hrtimer, struct perf_event, hw.hrtimer);
1184708fba1SKajol Jain 
1194708fba1SKajol Jain 	if (event->state != PERF_EVENT_STATE_ACTIVE)
1204708fba1SKajol Jain 		return HRTIMER_NORESTART;
1214708fba1SKajol Jain 
1224708fba1SKajol Jain 	vpa_dtl_dump_sample_data(event);
1234708fba1SKajol Jain 	period = max_t(u64, NSEC_PER_MSEC, event->hw.sample_period);
1244708fba1SKajol Jain 	hrtimer_forward_now(hrtimer, ns_to_ktime(period));
1254708fba1SKajol Jain 
1264708fba1SKajol Jain 	return HRTIMER_RESTART;
1274708fba1SKajol Jain }
1284708fba1SKajol Jain 
1294708fba1SKajol Jain static void vpa_dtl_start_hrtimer(struct perf_event *event)
1304708fba1SKajol Jain {
1314708fba1SKajol Jain 	u64 period;
1324708fba1SKajol Jain 	struct hw_perf_event *hwc = &event->hw;
1334708fba1SKajol Jain 
1344708fba1SKajol Jain 	period = max_t(u64, NSEC_PER_MSEC, hwc->sample_period);
1354708fba1SKajol Jain 	hrtimer_start(&hwc->hrtimer, ns_to_ktime(period), HRTIMER_MODE_REL_PINNED);
1364708fba1SKajol Jain }
1374708fba1SKajol Jain 
1384708fba1SKajol Jain static void vpa_dtl_stop_hrtimer(struct perf_event *event)
1394708fba1SKajol Jain {
1404708fba1SKajol Jain 	struct hw_perf_event *hwc = &event->hw;
1414708fba1SKajol Jain 
1424708fba1SKajol Jain 	hrtimer_cancel(&hwc->hrtimer);
1434708fba1SKajol Jain }
1444708fba1SKajol Jain 
1454708fba1SKajol Jain static void vpa_dtl_reset_global_refc(struct perf_event *event)
1464708fba1SKajol Jain {
1474708fba1SKajol Jain 	spin_lock(&dtl_global_lock);
1484708fba1SKajol Jain 	dtl_global_refc--;
1494708fba1SKajol Jain 	if (dtl_global_refc <= 0) {
1504708fba1SKajol Jain 		dtl_global_refc = 0;
1514708fba1SKajol Jain 		up_write(&dtl_access_lock);
1524708fba1SKajol Jain 	}
1534708fba1SKajol Jain 	spin_unlock(&dtl_global_lock);
1544708fba1SKajol Jain }
1554708fba1SKajol Jain 
1564708fba1SKajol Jain static int vpa_dtl_mem_alloc(int cpu)
1574708fba1SKajol Jain {
1584708fba1SKajol Jain 	struct vpa_dtl *dtl = &per_cpu(vpa_dtl_cpu, cpu);
1594708fba1SKajol Jain 	struct dtl_entry *buf = NULL;
1604708fba1SKajol Jain 
1614708fba1SKajol Jain 	/* Check for dispatch trace log buffer cache */
1624708fba1SKajol Jain 	if (!dtl_cache)
1634708fba1SKajol Jain 		return -ENOMEM;
1644708fba1SKajol Jain 
1654708fba1SKajol Jain 	buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL | GFP_ATOMIC, cpu_to_node(cpu));
1664708fba1SKajol Jain 	if (!buf) {
1674708fba1SKajol Jain 		pr_warn("buffer allocation failed for cpu %d\n", cpu);
1684708fba1SKajol Jain 		return -ENOMEM;
1694708fba1SKajol Jain 	}
1704708fba1SKajol Jain 	dtl->buf = buf;
1714708fba1SKajol Jain 	return 0;
1724708fba1SKajol Jain }
1734708fba1SKajol Jain 
1744708fba1SKajol Jain static int vpa_dtl_event_init(struct perf_event *event)
1754708fba1SKajol Jain {
1764708fba1SKajol Jain 	struct hw_perf_event *hwc = &event->hw;
1774708fba1SKajol Jain 
1784708fba1SKajol Jain 	/* test the event attr type for PMU enumeration */
1794708fba1SKajol Jain 	if (event->attr.type != event->pmu->type)
1804708fba1SKajol Jain 		return -ENOENT;
1814708fba1SKajol Jain 
1824708fba1SKajol Jain 	if (!perfmon_capable())
1834708fba1SKajol Jain 		return -EACCES;
1844708fba1SKajol Jain 
1854708fba1SKajol Jain 	/* Return if this is a counting event */
1864708fba1SKajol Jain 	if (!is_sampling_event(event))
1874708fba1SKajol Jain 		return -EOPNOTSUPP;
1884708fba1SKajol Jain 
1894708fba1SKajol Jain 	/* no branch sampling */
1904708fba1SKajol Jain 	if (has_branch_stack(event))
1914708fba1SKajol Jain 		return -EOPNOTSUPP;
1924708fba1SKajol Jain 
1934708fba1SKajol Jain 	/* Invalid eventcode */
1944708fba1SKajol Jain 	switch (event->attr.config) {
1954708fba1SKajol Jain 	case DTL_LOG_CEDE:
1964708fba1SKajol Jain 	case DTL_LOG_PREEMPT:
1974708fba1SKajol Jain 	case DTL_LOG_FAULT:
1984708fba1SKajol Jain 	case DTL_LOG_ALL:
1994708fba1SKajol Jain 		break;
2004708fba1SKajol Jain 	default:
2014708fba1SKajol Jain 		return -EINVAL;
2024708fba1SKajol Jain 	}
2034708fba1SKajol Jain 
2044708fba1SKajol Jain 	spin_lock(&dtl_global_lock);
2054708fba1SKajol Jain 
2064708fba1SKajol Jain 	/*
2074708fba1SKajol Jain 	 * To ensure there are no other conflicting dtl users
2084708fba1SKajol Jain 	 * (example: /proc/powerpc/vcpudispatch_stats or debugfs dtl),
2094708fba1SKajol Jain 	 * below code try to take the dtl_access_lock.
2104708fba1SKajol Jain 	 * The dtl_access_lock is a rwlock defined in dtl.h, which is used
2114708fba1SKajol Jain 	 * to unsure there is no conflicting dtl users.
2124708fba1SKajol Jain 	 * Based on below code, vpa_dtl pmu tries to take write access lock
2134708fba1SKajol Jain 	 * and also checks for dtl_global_refc, to make sure that the
2144708fba1SKajol Jain 	 * dtl_access_lock is taken by vpa_dtl pmu interface.
2154708fba1SKajol Jain 	 */
2164708fba1SKajol Jain 	if (dtl_global_refc == 0 && !down_write_trylock(&dtl_access_lock)) {
2174708fba1SKajol Jain 		spin_unlock(&dtl_global_lock);
2184708fba1SKajol Jain 		return -EBUSY;
2194708fba1SKajol Jain 	}
2204708fba1SKajol Jain 
2214708fba1SKajol Jain 	/* Allocate dtl buffer memory */
2224708fba1SKajol Jain 	if (vpa_dtl_mem_alloc(event->cpu)) {
2234708fba1SKajol Jain 		spin_unlock(&dtl_global_lock);
2244708fba1SKajol Jain 		return -ENOMEM;
2254708fba1SKajol Jain 	}
2264708fba1SKajol Jain 
2274708fba1SKajol Jain 	/*
2284708fba1SKajol Jain 	 * Increment the number of active vpa_dtl pmu threads. The
2294708fba1SKajol Jain 	 * dtl_global_refc is used to keep count of cpu threads that
2304708fba1SKajol Jain 	 * currently capturing dtl data using vpa_dtl pmu interface.
2314708fba1SKajol Jain 	 */
2324708fba1SKajol Jain 	dtl_global_refc++;
2334708fba1SKajol Jain 
2344708fba1SKajol Jain 	spin_unlock(&dtl_global_lock);
2354708fba1SKajol Jain 
2364708fba1SKajol Jain 	hrtimer_setup(&hwc->hrtimer, vpa_dtl_hrtimer_handle, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2374708fba1SKajol Jain 
2384708fba1SKajol Jain 	/*
2394708fba1SKajol Jain 	 * Since hrtimers have a fixed rate, we can do a static freq->period
2404708fba1SKajol Jain 	 * mapping and avoid the whole period adjust feedback stuff.
2414708fba1SKajol Jain 	 */
2424708fba1SKajol Jain 	if (event->attr.freq) {
2434708fba1SKajol Jain 		long freq = event->attr.sample_freq;
2444708fba1SKajol Jain 
2454708fba1SKajol Jain 		event->attr.sample_period = NSEC_PER_SEC / freq;
2464708fba1SKajol Jain 		hwc->sample_period = event->attr.sample_period;
2474708fba1SKajol Jain 		local64_set(&hwc->period_left, hwc->sample_period);
2484708fba1SKajol Jain 		hwc->last_period = hwc->sample_period;
2494708fba1SKajol Jain 		event->attr.freq = 0;
2504708fba1SKajol Jain 	}
2514708fba1SKajol Jain 
2524708fba1SKajol Jain 	event->destroy = vpa_dtl_reset_global_refc;
2534708fba1SKajol Jain 	return 0;
2544708fba1SKajol Jain }
2554708fba1SKajol Jain 
2564708fba1SKajol Jain static int vpa_dtl_event_add(struct perf_event *event, int flags)
2574708fba1SKajol Jain {
2584708fba1SKajol Jain 	int ret, hwcpu;
2594708fba1SKajol Jain 	unsigned long addr;
2604708fba1SKajol Jain 	struct vpa_dtl *dtl = &per_cpu(vpa_dtl_cpu, event->cpu);
2614708fba1SKajol Jain 
2624708fba1SKajol Jain 	/*
2634708fba1SKajol Jain 	 * Register our dtl buffer with the hypervisor. The
2644708fba1SKajol Jain 	 * HV expects the buffer size to be passed in the second
2654708fba1SKajol Jain 	 * word of the buffer. Refer section '14.11.3.2. H_REGISTER_VPA'
2664708fba1SKajol Jain 	 * from PAPR for more information.
2674708fba1SKajol Jain 	 */
2684708fba1SKajol Jain 	((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES);
2694708fba1SKajol Jain 	dtl->last_idx = 0;
2704708fba1SKajol Jain 
2714708fba1SKajol Jain 	hwcpu = get_hard_smp_processor_id(event->cpu);
2724708fba1SKajol Jain 	addr = __pa(dtl->buf);
2734708fba1SKajol Jain 
2744708fba1SKajol Jain 	ret = register_dtl(hwcpu, addr);
2754708fba1SKajol Jain 	if (ret) {
2764708fba1SKajol Jain 		pr_warn("DTL registration for cpu %d (hw %d) failed with %d\n",
2774708fba1SKajol Jain 			event->cpu, hwcpu, ret);
2784708fba1SKajol Jain 		return ret;
2794708fba1SKajol Jain 	}
2804708fba1SKajol Jain 
2814708fba1SKajol Jain 	/* set our initial buffer indices */
2824708fba1SKajol Jain 	lppaca_of(event->cpu).dtl_idx = 0;
2834708fba1SKajol Jain 
2844708fba1SKajol Jain 	/*
2854708fba1SKajol Jain 	 * Ensure that our updates to the lppaca fields have
2864708fba1SKajol Jain 	 * occurred before we actually enable the logging
2874708fba1SKajol Jain 	 */
2884708fba1SKajol Jain 	smp_wmb();
2894708fba1SKajol Jain 
2904708fba1SKajol Jain 	/* enable event logging */
2914708fba1SKajol Jain 	lppaca_of(event->cpu).dtl_enable_mask = event->attr.config;
2924708fba1SKajol Jain 
2934708fba1SKajol Jain 	vpa_dtl_start_hrtimer(event);
2944708fba1SKajol Jain 
2954708fba1SKajol Jain 	return 0;
2964708fba1SKajol Jain }
2974708fba1SKajol Jain 
2984708fba1SKajol Jain static void vpa_dtl_event_del(struct perf_event *event, int flags)
2994708fba1SKajol Jain {
3004708fba1SKajol Jain 	int hwcpu = get_hard_smp_processor_id(event->cpu);
3014708fba1SKajol Jain 	struct vpa_dtl *dtl = &per_cpu(vpa_dtl_cpu, event->cpu);
3024708fba1SKajol Jain 
3034708fba1SKajol Jain 	vpa_dtl_stop_hrtimer(event);
3044708fba1SKajol Jain 	unregister_dtl(hwcpu);
3054708fba1SKajol Jain 	kmem_cache_free(dtl_cache, dtl->buf);
3064708fba1SKajol Jain 	dtl->buf = NULL;
3074708fba1SKajol Jain 	lppaca_of(event->cpu).dtl_enable_mask = 0x0;
3084708fba1SKajol Jain }
3094708fba1SKajol Jain 
3104708fba1SKajol Jain /*
3114708fba1SKajol Jain  * This function definition is empty as vpa_dtl_dump_sample_data
3124708fba1SKajol Jain  * is used to parse and dump the dispatch trace log data,
3134708fba1SKajol Jain  * to perf data.
3144708fba1SKajol Jain  */
3154708fba1SKajol Jain static void vpa_dtl_event_read(struct perf_event *event)
3164708fba1SKajol Jain {
3174708fba1SKajol Jain }
3184708fba1SKajol Jain 
319*5d75aed8SAthira Rajeev /*
320*5d75aed8SAthira Rajeev  * Set up pmu-private data structures for an AUX area
321*5d75aed8SAthira Rajeev  * **pages contains the aux buffer allocated for this event
322*5d75aed8SAthira Rajeev  * for the corresponding cpu. rb_alloc_aux uses "alloc_pages_node"
323*5d75aed8SAthira Rajeev  * and returns pointer to each page address. Map these pages to
324*5d75aed8SAthira Rajeev  * contiguous space using vmap and use that as base address.
325*5d75aed8SAthira Rajeev  *
326*5d75aed8SAthira Rajeev  * The aux private data structure ie, "struct vpa_pmu_buf" mainly
327*5d75aed8SAthira Rajeev  * saves
328*5d75aed8SAthira Rajeev  * - buf->base: aux buffer base address
329*5d75aed8SAthira Rajeev  * - buf->head: offset from base address where data will be written to.
330*5d75aed8SAthira Rajeev  * - buf->size: Size of allocated memory
331*5d75aed8SAthira Rajeev  */
332*5d75aed8SAthira Rajeev static void *vpa_dtl_setup_aux(struct perf_event *event, void **pages,
333*5d75aed8SAthira Rajeev 		int nr_pages, bool snapshot)
334*5d75aed8SAthira Rajeev {
335*5d75aed8SAthira Rajeev 	int i, cpu = event->cpu;
336*5d75aed8SAthira Rajeev 	struct vpa_pmu_buf *buf __free(kfree) = NULL;
337*5d75aed8SAthira Rajeev 	struct page **pglist __free(kfree) = NULL;
338*5d75aed8SAthira Rajeev 
339*5d75aed8SAthira Rajeev 	/* We need at least one page for this to work. */
340*5d75aed8SAthira Rajeev 	if (!nr_pages)
341*5d75aed8SAthira Rajeev 		return NULL;
342*5d75aed8SAthira Rajeev 
343*5d75aed8SAthira Rajeev 	if (cpu == -1)
344*5d75aed8SAthira Rajeev 		cpu = raw_smp_processor_id();
345*5d75aed8SAthira Rajeev 
346*5d75aed8SAthira Rajeev 	buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, cpu_to_node(cpu));
347*5d75aed8SAthira Rajeev 	if (!buf)
348*5d75aed8SAthira Rajeev 		return NULL;
349*5d75aed8SAthira Rajeev 
350*5d75aed8SAthira Rajeev 	pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
351*5d75aed8SAthira Rajeev 	if (!pglist)
352*5d75aed8SAthira Rajeev 		return NULL;
353*5d75aed8SAthira Rajeev 
354*5d75aed8SAthira Rajeev 	for (i = 0; i < nr_pages; ++i)
355*5d75aed8SAthira Rajeev 		pglist[i] = virt_to_page(pages[i]);
356*5d75aed8SAthira Rajeev 
357*5d75aed8SAthira Rajeev 	buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
358*5d75aed8SAthira Rajeev 	if (!buf->base)
359*5d75aed8SAthira Rajeev 		return NULL;
360*5d75aed8SAthira Rajeev 
361*5d75aed8SAthira Rajeev 	buf->nr_pages = nr_pages;
362*5d75aed8SAthira Rajeev 	buf->snapshot = false;
363*5d75aed8SAthira Rajeev 
364*5d75aed8SAthira Rajeev 	buf->size = nr_pages << PAGE_SHIFT;
365*5d75aed8SAthira Rajeev 	buf->head = 0;
366*5d75aed8SAthira Rajeev 	return no_free_ptr(buf);
367*5d75aed8SAthira Rajeev }
368*5d75aed8SAthira Rajeev 
369*5d75aed8SAthira Rajeev /*
370*5d75aed8SAthira Rajeev  * free pmu-private AUX data structures
371*5d75aed8SAthira Rajeev  */
372*5d75aed8SAthira Rajeev static void vpa_dtl_free_aux(void *aux)
373*5d75aed8SAthira Rajeev {
374*5d75aed8SAthira Rajeev 	struct vpa_pmu_buf *buf = aux;
375*5d75aed8SAthira Rajeev 
376*5d75aed8SAthira Rajeev 	vunmap(buf->base);
377*5d75aed8SAthira Rajeev 	kfree(buf);
378*5d75aed8SAthira Rajeev }
379*5d75aed8SAthira Rajeev 
3804708fba1SKajol Jain static struct pmu vpa_dtl_pmu = {
3814708fba1SKajol Jain 	.task_ctx_nr = perf_invalid_context,
3824708fba1SKajol Jain 
3834708fba1SKajol Jain 	.name = "vpa_dtl",
3844708fba1SKajol Jain 	.attr_groups = attr_groups,
3854708fba1SKajol Jain 	.event_init  = vpa_dtl_event_init,
3864708fba1SKajol Jain 	.add         = vpa_dtl_event_add,
3874708fba1SKajol Jain 	.del         = vpa_dtl_event_del,
3884708fba1SKajol Jain 	.read        = vpa_dtl_event_read,
389*5d75aed8SAthira Rajeev 	.setup_aux   = vpa_dtl_setup_aux,
390*5d75aed8SAthira Rajeev 	.free_aux    = vpa_dtl_free_aux,
3914708fba1SKajol Jain 	.capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_EXCLUSIVE,
3924708fba1SKajol Jain };
3934708fba1SKajol Jain 
3944708fba1SKajol Jain static int vpa_dtl_init(void)
3954708fba1SKajol Jain {
3964708fba1SKajol Jain 	int r;
3974708fba1SKajol Jain 
3984708fba1SKajol Jain 	if (!firmware_has_feature(FW_FEATURE_SPLPAR)) {
3994708fba1SKajol Jain 		pr_debug("not a shared virtualized system, not enabling\n");
4004708fba1SKajol Jain 		return -ENODEV;
4014708fba1SKajol Jain 	}
4024708fba1SKajol Jain 
4034708fba1SKajol Jain 	/* This driver is intended only for L1 host. */
4044708fba1SKajol Jain 	if (is_kvm_guest()) {
4054708fba1SKajol Jain 		pr_debug("Only supported for L1 host system\n");
4064708fba1SKajol Jain 		return -ENODEV;
4074708fba1SKajol Jain 	}
4084708fba1SKajol Jain 
4094708fba1SKajol Jain 	r = perf_pmu_register(&vpa_dtl_pmu, vpa_dtl_pmu.name, -1);
4104708fba1SKajol Jain 	if (r)
4114708fba1SKajol Jain 		return r;
4124708fba1SKajol Jain 
4134708fba1SKajol Jain 	return 0;
4144708fba1SKajol Jain }
4154708fba1SKajol Jain 
4164708fba1SKajol Jain device_initcall(vpa_dtl_init);
4174708fba1SKajol Jain #endif //CONFIG_PPC_SPLPAR
418