xref: /linux/arch/powerpc/perf/hv-24x7.c (revision 98817a84ff1c755c347ac633ff017a623a631fad)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Hypervisor supplied "24x7" performance counter support
4  *
5  * Author: Cody P Schafer <cody@linux.vnet.ibm.com>
6  * Copyright 2014 IBM Corporation.
7  */
8 
9 #define pr_fmt(fmt) "hv-24x7: " fmt
10 
11 #include <linux/perf_event.h>
12 #include <linux/rbtree.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 
17 #include <asm/cputhreads.h>
18 #include <asm/firmware.h>
19 #include <asm/hvcall.h>
20 #include <asm/io.h>
21 #include <linux/byteorder/generic.h>
22 
23 #include <asm/rtas.h>
24 #include "hv-24x7.h"
25 #include "hv-24x7-catalog.h"
26 #include "hv-common.h"
27 
28 /* Version of the 24x7 hypervisor API that we should use in this machine. */
29 static int interface_version;
30 
31 /* Whether we have to aggregate result data for some domains. */
32 static bool aggregate_result_elements;
33 
34 static bool domain_is_valid(unsigned domain)
35 {
36 	switch (domain) {
37 #define DOMAIN(n, v, x, c)		\
38 	case HV_PERF_DOMAIN_##n:	\
39 		/* fall through */
40 #include "hv-24x7-domains.h"
41 #undef DOMAIN
42 		return true;
43 	default:
44 		return false;
45 	}
46 }
47 
48 static bool is_physical_domain(unsigned domain)
49 {
50 	switch (domain) {
51 #define DOMAIN(n, v, x, c)		\
52 	case HV_PERF_DOMAIN_##n:	\
53 		return c;
54 #include "hv-24x7-domains.h"
55 #undef DOMAIN
56 	default:
57 		return false;
58 	}
59 }
60 
61 /*
62  * The Processor Module Information system parameter allows transferring
63  * of certain processor module information from the platform to the OS.
64  * Refer PAPR+ document to get parameter token value as '43'.
65  */
66 
67 #define PROCESSOR_MODULE_INFO   43
68 
69 static u32 phys_sockets;	/* Physical sockets */
70 static u32 phys_chipspersocket;	/* Physical chips per socket*/
71 static u32 phys_coresperchip; /* Physical cores per chip */
72 
73 /*
74  * read_24x7_sys_info()
75  * Retrieve the number of sockets and chips per socket and cores per
76  * chip details through the get-system-parameter rtas call.
77  */
78 void read_24x7_sys_info(void)
79 {
80 	int call_status, len, ntypes;
81 
82 	spin_lock(&rtas_data_buf_lock);
83 
84 	/*
85 	 * Making system parameter: chips and sockets and cores per chip
86 	 * default to 1.
87 	 */
88 	phys_sockets = 1;
89 	phys_chipspersocket = 1;
90 	phys_coresperchip = 1;
91 
92 	call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
93 				NULL,
94 				PROCESSOR_MODULE_INFO,
95 				__pa(rtas_data_buf),
96 				RTAS_DATA_BUF_SIZE);
97 
98 	if (call_status != 0) {
99 		pr_err("Error calling get-system-parameter %d\n",
100 		       call_status);
101 	} else {
102 		len = be16_to_cpup((__be16 *)&rtas_data_buf[0]);
103 		if (len < 8)
104 			goto out;
105 
106 		ntypes = be16_to_cpup((__be16 *)&rtas_data_buf[2]);
107 
108 		if (!ntypes)
109 			goto out;
110 
111 		phys_sockets = be16_to_cpup((__be16 *)&rtas_data_buf[4]);
112 		phys_chipspersocket = be16_to_cpup((__be16 *)&rtas_data_buf[6]);
113 		phys_coresperchip = be16_to_cpup((__be16 *)&rtas_data_buf[8]);
114 	}
115 
116 out:
117 	spin_unlock(&rtas_data_buf_lock);
118 }
119 
120 /* Domains for which more than one result element are returned for each event. */
121 static bool domain_needs_aggregation(unsigned int domain)
122 {
123 	return aggregate_result_elements &&
124 			(domain == HV_PERF_DOMAIN_PHYS_CORE ||
125 			 (domain >= HV_PERF_DOMAIN_VCPU_HOME_CORE &&
126 			  domain <= HV_PERF_DOMAIN_VCPU_REMOTE_NODE));
127 }
128 
129 static const char *domain_name(unsigned domain)
130 {
131 	if (!domain_is_valid(domain))
132 		return NULL;
133 
134 	switch (domain) {
135 	case HV_PERF_DOMAIN_PHYS_CHIP:		return "Physical Chip";
136 	case HV_PERF_DOMAIN_PHYS_CORE:		return "Physical Core";
137 	case HV_PERF_DOMAIN_VCPU_HOME_CORE:	return "VCPU Home Core";
138 	case HV_PERF_DOMAIN_VCPU_HOME_CHIP:	return "VCPU Home Chip";
139 	case HV_PERF_DOMAIN_VCPU_HOME_NODE:	return "VCPU Home Node";
140 	case HV_PERF_DOMAIN_VCPU_REMOTE_NODE:	return "VCPU Remote Node";
141 	}
142 
143 	WARN_ON_ONCE(domain);
144 	return NULL;
145 }
146 
147 static bool catalog_entry_domain_is_valid(unsigned domain)
148 {
149 	/* POWER8 doesn't support virtual domains. */
150 	if (interface_version == 1)
151 		return is_physical_domain(domain);
152 	else
153 		return domain_is_valid(domain);
154 }
155 
156 /*
157  * TODO: Merging events:
158  * - Think of the hcall as an interface to a 4d array of counters:
159  *   - x = domains
160  *   - y = indexes in the domain (core, chip, vcpu, node, etc)
161  *   - z = offset into the counter space
162  *   - w = lpars (guest vms, "logical partitions")
163  * - A single request is: x,y,y_last,z,z_last,w,w_last
164  *   - this means we can retrieve a rectangle of counters in y,z for a single x.
165  *
166  * - Things to consider (ignoring w):
167  *   - input  cost_per_request = 16
168  *   - output cost_per_result(ys,zs)  = 8 + 8 * ys + ys * zs
169  *   - limited number of requests per hcall (must fit into 4K bytes)
170  *     - 4k = 16 [buffer header] - 16 [request size] * request_count
171  *     - 255 requests per hcall
172  *   - sometimes it will be more efficient to read extra data and discard
173  */
174 
175 /*
176  * Example usage:
177  *  perf stat -e 'hv_24x7/domain=2,offset=8,vcpu=0,lpar=0xffffffff/'
178  */
179 
180 /* u3 0-6, one of HV_24X7_PERF_DOMAIN */
181 EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3);
182 /* u16 */
183 EVENT_DEFINE_RANGE_FORMAT(core, config, 16, 31);
184 EVENT_DEFINE_RANGE_FORMAT(chip, config, 16, 31);
185 EVENT_DEFINE_RANGE_FORMAT(vcpu, config, 16, 31);
186 /* u32, see "data_offset" */
187 EVENT_DEFINE_RANGE_FORMAT(offset, config, 32, 63);
188 /* u16 */
189 EVENT_DEFINE_RANGE_FORMAT(lpar, config1, 0, 15);
190 
191 EVENT_DEFINE_RANGE(reserved1, config,   4, 15);
192 EVENT_DEFINE_RANGE(reserved2, config1, 16, 63);
193 EVENT_DEFINE_RANGE(reserved3, config2,  0, 63);
194 
195 static struct attribute *format_attrs[] = {
196 	&format_attr_domain.attr,
197 	&format_attr_offset.attr,
198 	&format_attr_core.attr,
199 	&format_attr_chip.attr,
200 	&format_attr_vcpu.attr,
201 	&format_attr_lpar.attr,
202 	NULL,
203 };
204 
205 static struct attribute_group format_group = {
206 	.name = "format",
207 	.attrs = format_attrs,
208 };
209 
210 static struct attribute_group event_group = {
211 	.name = "events",
212 	/* .attrs is set in init */
213 };
214 
215 static struct attribute_group event_desc_group = {
216 	.name = "event_descs",
217 	/* .attrs is set in init */
218 };
219 
220 static struct attribute_group event_long_desc_group = {
221 	.name = "event_long_descs",
222 	/* .attrs is set in init */
223 };
224 
225 static struct kmem_cache *hv_page_cache;
226 
227 DEFINE_PER_CPU(int, hv_24x7_txn_flags);
228 DEFINE_PER_CPU(int, hv_24x7_txn_err);
229 
230 struct hv_24x7_hw {
231 	struct perf_event *events[255];
232 };
233 
234 DEFINE_PER_CPU(struct hv_24x7_hw, hv_24x7_hw);
235 
236 /*
237  * request_buffer and result_buffer are not required to be 4k aligned,
238  * but are not allowed to cross any 4k boundary. Aligning them to 4k is
239  * the simplest way to ensure that.
240  */
241 #define H24x7_DATA_BUFFER_SIZE	4096
242 DEFINE_PER_CPU(char, hv_24x7_reqb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
243 DEFINE_PER_CPU(char, hv_24x7_resb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
244 
245 static unsigned int max_num_requests(int interface_version)
246 {
247 	return (H24x7_DATA_BUFFER_SIZE - sizeof(struct hv_24x7_request_buffer))
248 		/ H24x7_REQUEST_SIZE(interface_version);
249 }
250 
251 static char *event_name(struct hv_24x7_event_data *ev, int *len)
252 {
253 	*len = be16_to_cpu(ev->event_name_len) - 2;
254 	return (char *)ev->remainder;
255 }
256 
257 static char *event_desc(struct hv_24x7_event_data *ev, int *len)
258 {
259 	unsigned nl = be16_to_cpu(ev->event_name_len);
260 	__be16 *desc_len = (__be16 *)(ev->remainder + nl - 2);
261 
262 	*len = be16_to_cpu(*desc_len) - 2;
263 	return (char *)ev->remainder + nl;
264 }
265 
266 static char *event_long_desc(struct hv_24x7_event_data *ev, int *len)
267 {
268 	unsigned nl = be16_to_cpu(ev->event_name_len);
269 	__be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2);
270 	unsigned desc_len = be16_to_cpu(*desc_len_);
271 	__be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2);
272 
273 	*len = be16_to_cpu(*long_desc_len) - 2;
274 	return (char *)ev->remainder + nl + desc_len;
275 }
276 
277 static bool event_fixed_portion_is_within(struct hv_24x7_event_data *ev,
278 					  void *end)
279 {
280 	void *start = ev;
281 
282 	return (start + offsetof(struct hv_24x7_event_data, remainder)) < end;
283 }
284 
285 /*
286  * Things we don't check:
287  *  - padding for desc, name, and long/detailed desc is required to be '\0'
288  *    bytes.
289  *
290  *  Return NULL if we pass end,
291  *  Otherwise return the address of the byte just following the event.
292  */
293 static void *event_end(struct hv_24x7_event_data *ev, void *end)
294 {
295 	void *start = ev;
296 	__be16 *dl_, *ldl_;
297 	unsigned dl, ldl;
298 	unsigned nl = be16_to_cpu(ev->event_name_len);
299 
300 	if (nl < 2) {
301 		pr_debug("%s: name length too short: %d", __func__, nl);
302 		return NULL;
303 	}
304 
305 	if (start + nl > end) {
306 		pr_debug("%s: start=%p + nl=%u > end=%p",
307 				__func__, start, nl, end);
308 		return NULL;
309 	}
310 
311 	dl_ = (__be16 *)(ev->remainder + nl - 2);
312 	if (!IS_ALIGNED((uintptr_t)dl_, 2))
313 		pr_warn("desc len not aligned %p", dl_);
314 	dl = be16_to_cpu(*dl_);
315 	if (dl < 2) {
316 		pr_debug("%s: desc len too short: %d", __func__, dl);
317 		return NULL;
318 	}
319 
320 	if (start + nl + dl > end) {
321 		pr_debug("%s: (start=%p + nl=%u + dl=%u)=%p > end=%p",
322 				__func__, start, nl, dl, start + nl + dl, end);
323 		return NULL;
324 	}
325 
326 	ldl_ = (__be16 *)(ev->remainder + nl + dl - 2);
327 	if (!IS_ALIGNED((uintptr_t)ldl_, 2))
328 		pr_warn("long desc len not aligned %p", ldl_);
329 	ldl = be16_to_cpu(*ldl_);
330 	if (ldl < 2) {
331 		pr_debug("%s: long desc len too short (ldl=%u)",
332 				__func__, ldl);
333 		return NULL;
334 	}
335 
336 	if (start + nl + dl + ldl > end) {
337 		pr_debug("%s: start=%p + nl=%u + dl=%u + ldl=%u > end=%p",
338 				__func__, start, nl, dl, ldl, end);
339 		return NULL;
340 	}
341 
342 	return start + nl + dl + ldl;
343 }
344 
345 static long h_get_24x7_catalog_page_(unsigned long phys_4096,
346 				     unsigned long version, unsigned long index)
347 {
348 	pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
349 			phys_4096, version, index);
350 
351 	WARN_ON(!IS_ALIGNED(phys_4096, 4096));
352 
353 	return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
354 			phys_4096, version, index);
355 }
356 
357 static long h_get_24x7_catalog_page(char page[], u64 version, u32 index)
358 {
359 	return h_get_24x7_catalog_page_(virt_to_phys(page),
360 					version, index);
361 }
362 
363 /*
364  * Each event we find in the catalog, will have a sysfs entry. Format the
365  * data for this sysfs entry based on the event's domain.
366  *
367  * Events belonging to the Chip domain can only be monitored in that domain.
368  * i.e the domain for these events is a fixed/knwon value.
369  *
370  * Events belonging to the Core domain can be monitored either in the physical
371  * core or in one of the virtual CPU domains. So the domain value for these
372  * events must be specified by the user (i.e is a required parameter). Format
373  * the Core events with 'domain=?' so the perf-tool can error check required
374  * parameters.
375  *
376  * NOTE: For the Core domain events, rather than making domain a required
377  *	 parameter we could default it to PHYS_CORE and allowe users to
378  *	 override the domain to one of the VCPU domains.
379  *
380  *	 However, this can make the interface a little inconsistent.
381  *
382  *	 If we set domain=2 (PHYS_CHIP) and allow user to override this field
383  *	 the user may be tempted to also modify the "offset=x" field in which
384  *	 can lead to confusing usage. Consider the HPM_PCYC (offset=0x18) and
385  *	 HPM_INST (offset=0x20) events. With:
386  *
387  *		perf stat -e hv_24x7/HPM_PCYC,offset=0x20/
388  *
389  *	we end up monitoring HPM_INST, while the command line has HPM_PCYC.
390  *
391  *	By not assigning a default value to the domain for the Core events,
392  *	we can have simple guidelines:
393  *
394  *		- Specifying values for parameters with "=?" is required.
395  *
396  *		- Specifying (i.e overriding) values for other parameters
397  *		  is undefined.
398  */
399 static char *event_fmt(struct hv_24x7_event_data *event, unsigned domain)
400 {
401 	const char *sindex;
402 	const char *lpar;
403 	const char *domain_str;
404 	char buf[8];
405 
406 	switch (domain) {
407 	case HV_PERF_DOMAIN_PHYS_CHIP:
408 		snprintf(buf, sizeof(buf), "%d", domain);
409 		domain_str = buf;
410 		lpar = "0x0";
411 		sindex = "chip";
412 		break;
413 	case HV_PERF_DOMAIN_PHYS_CORE:
414 		domain_str = "?";
415 		lpar = "0x0";
416 		sindex = "core";
417 		break;
418 	default:
419 		domain_str = "?";
420 		lpar = "?";
421 		sindex = "vcpu";
422 	}
423 
424 	return kasprintf(GFP_KERNEL,
425 			"domain=%s,offset=0x%x,%s=?,lpar=%s",
426 			domain_str,
427 			be16_to_cpu(event->event_counter_offs) +
428 				be16_to_cpu(event->event_group_record_offs),
429 			sindex,
430 			lpar);
431 }
432 
433 /* Avoid trusting fw to NUL terminate strings */
434 static char *memdup_to_str(char *maybe_str, int max_len, gfp_t gfp)
435 {
436 	return kasprintf(gfp, "%.*s", max_len, maybe_str);
437 }
438 
439 static ssize_t device_show_string(struct device *dev,
440 		struct device_attribute *attr, char *buf)
441 {
442 	struct dev_ext_attribute *d;
443 
444 	d = container_of(attr, struct dev_ext_attribute, attr);
445 
446 	return sprintf(buf, "%s\n", (char *)d->var);
447 }
448 
449 static ssize_t sockets_show(struct device *dev,
450 			    struct device_attribute *attr, char *buf)
451 {
452 	return sprintf(buf, "%d\n", phys_sockets);
453 }
454 
455 static ssize_t chipspersocket_show(struct device *dev,
456 				   struct device_attribute *attr, char *buf)
457 {
458 	return sprintf(buf, "%d\n", phys_chipspersocket);
459 }
460 
461 static ssize_t coresperchip_show(struct device *dev,
462 				 struct device_attribute *attr, char *buf)
463 {
464 	return sprintf(buf, "%d\n", phys_coresperchip);
465 }
466 
467 static struct attribute *device_str_attr_create_(char *name, char *str)
468 {
469 	struct dev_ext_attribute *attr = kzalloc(sizeof(*attr), GFP_KERNEL);
470 
471 	if (!attr)
472 		return NULL;
473 
474 	sysfs_attr_init(&attr->attr.attr);
475 
476 	attr->var = str;
477 	attr->attr.attr.name = name;
478 	attr->attr.attr.mode = 0444;
479 	attr->attr.show = device_show_string;
480 
481 	return &attr->attr.attr;
482 }
483 
484 /*
485  * Allocate and initialize strings representing event attributes.
486  *
487  * NOTE: The strings allocated here are never destroyed and continue to
488  *	 exist till shutdown. This is to allow us to create as many events
489  *	 from the catalog as possible, even if we encounter errors with some.
490  *	 In case of changes to error paths in future, these may need to be
491  *	 freed by the caller.
492  */
493 static struct attribute *device_str_attr_create(char *name, int name_max,
494 						int name_nonce,
495 						char *str, size_t str_max)
496 {
497 	char *n;
498 	char *s = memdup_to_str(str, str_max, GFP_KERNEL);
499 	struct attribute *a;
500 
501 	if (!s)
502 		return NULL;
503 
504 	if (!name_nonce)
505 		n = kasprintf(GFP_KERNEL, "%.*s", name_max, name);
506 	else
507 		n = kasprintf(GFP_KERNEL, "%.*s__%d", name_max, name,
508 					name_nonce);
509 	if (!n)
510 		goto out_s;
511 
512 	a = device_str_attr_create_(n, s);
513 	if (!a)
514 		goto out_n;
515 
516 	return a;
517 out_n:
518 	kfree(n);
519 out_s:
520 	kfree(s);
521 	return NULL;
522 }
523 
524 static struct attribute *event_to_attr(unsigned ix,
525 				       struct hv_24x7_event_data *event,
526 				       unsigned domain,
527 				       int nonce)
528 {
529 	int event_name_len;
530 	char *ev_name, *a_ev_name, *val;
531 	struct attribute *attr;
532 
533 	if (!domain_is_valid(domain)) {
534 		pr_warn("catalog event %u has invalid domain %u\n",
535 				ix, domain);
536 		return NULL;
537 	}
538 
539 	val = event_fmt(event, domain);
540 	if (!val)
541 		return NULL;
542 
543 	ev_name = event_name(event, &event_name_len);
544 	if (!nonce)
545 		a_ev_name = kasprintf(GFP_KERNEL, "%.*s",
546 				(int)event_name_len, ev_name);
547 	else
548 		a_ev_name = kasprintf(GFP_KERNEL, "%.*s__%d",
549 				(int)event_name_len, ev_name, nonce);
550 
551 	if (!a_ev_name)
552 		goto out_val;
553 
554 	attr = device_str_attr_create_(a_ev_name, val);
555 	if (!attr)
556 		goto out_name;
557 
558 	return attr;
559 out_name:
560 	kfree(a_ev_name);
561 out_val:
562 	kfree(val);
563 	return NULL;
564 }
565 
566 static struct attribute *event_to_desc_attr(struct hv_24x7_event_data *event,
567 					    int nonce)
568 {
569 	int nl, dl;
570 	char *name = event_name(event, &nl);
571 	char *desc = event_desc(event, &dl);
572 
573 	/* If there isn't a description, don't create the sysfs file */
574 	if (!dl)
575 		return NULL;
576 
577 	return device_str_attr_create(name, nl, nonce, desc, dl);
578 }
579 
580 static struct attribute *
581 event_to_long_desc_attr(struct hv_24x7_event_data *event, int nonce)
582 {
583 	int nl, dl;
584 	char *name = event_name(event, &nl);
585 	char *desc = event_long_desc(event, &dl);
586 
587 	/* If there isn't a description, don't create the sysfs file */
588 	if (!dl)
589 		return NULL;
590 
591 	return device_str_attr_create(name, nl, nonce, desc, dl);
592 }
593 
594 static int event_data_to_attrs(unsigned ix, struct attribute **attrs,
595 				   struct hv_24x7_event_data *event, int nonce)
596 {
597 	*attrs = event_to_attr(ix, event, event->domain, nonce);
598 	if (!*attrs)
599 		return -1;
600 
601 	return 0;
602 }
603 
604 /* */
605 struct event_uniq {
606 	struct rb_node node;
607 	const char *name;
608 	int nl;
609 	unsigned ct;
610 	unsigned domain;
611 };
612 
613 static int memord(const void *d1, size_t s1, const void *d2, size_t s2)
614 {
615 	if (s1 < s2)
616 		return 1;
617 	if (s1 > s2)
618 		return -1;
619 
620 	return memcmp(d1, d2, s1);
621 }
622 
623 static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2,
624 		       size_t s2, unsigned d2)
625 {
626 	int r = memord(v1, s1, v2, s2);
627 
628 	if (r)
629 		return r;
630 	if (d1 > d2)
631 		return 1;
632 	if (d2 > d1)
633 		return -1;
634 	return 0;
635 }
636 
637 static int event_uniq_add(struct rb_root *root, const char *name, int nl,
638 			  unsigned domain)
639 {
640 	struct rb_node **new = &(root->rb_node), *parent = NULL;
641 	struct event_uniq *data;
642 
643 	/* Figure out where to put new node */
644 	while (*new) {
645 		struct event_uniq *it;
646 		int result;
647 
648 		it = rb_entry(*new, struct event_uniq, node);
649 		result = ev_uniq_ord(name, nl, domain, it->name, it->nl,
650 					it->domain);
651 
652 		parent = *new;
653 		if (result < 0)
654 			new = &((*new)->rb_left);
655 		else if (result > 0)
656 			new = &((*new)->rb_right);
657 		else {
658 			it->ct++;
659 			pr_info("found a duplicate event %.*s, ct=%u\n", nl,
660 						name, it->ct);
661 			return it->ct;
662 		}
663 	}
664 
665 	data = kmalloc(sizeof(*data), GFP_KERNEL);
666 	if (!data)
667 		return -ENOMEM;
668 
669 	*data = (struct event_uniq) {
670 		.name = name,
671 		.nl = nl,
672 		.ct = 0,
673 		.domain = domain,
674 	};
675 
676 	/* Add new node and rebalance tree. */
677 	rb_link_node(&data->node, parent, new);
678 	rb_insert_color(&data->node, root);
679 
680 	/* data->ct */
681 	return 0;
682 }
683 
684 static void event_uniq_destroy(struct rb_root *root)
685 {
686 	/*
687 	 * the strings we point to are in the giant block of memory filled by
688 	 * the catalog, and are freed separately.
689 	 */
690 	struct event_uniq *pos, *n;
691 
692 	rbtree_postorder_for_each_entry_safe(pos, n, root, node)
693 		kfree(pos);
694 }
695 
696 
697 /*
698  * ensure the event structure's sizes are self consistent and don't cause us to
699  * read outside of the event
700  *
701  * On success, return the event length in bytes.
702  * Otherwise, return -1 (and print as appropriate).
703  */
704 static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event,
705 					  size_t event_idx,
706 					  size_t event_data_bytes,
707 					  size_t event_entry_count,
708 					  size_t offset, void *end)
709 {
710 	ssize_t ev_len;
711 	void *ev_end, *calc_ev_end;
712 
713 	if (offset >= event_data_bytes)
714 		return -1;
715 
716 	if (event_idx >= event_entry_count) {
717 		pr_devel("catalog event data has %zu bytes of padding after last event\n",
718 				event_data_bytes - offset);
719 		return -1;
720 	}
721 
722 	if (!event_fixed_portion_is_within(event, end)) {
723 		pr_warn("event %zu fixed portion is not within range\n",
724 				event_idx);
725 		return -1;
726 	}
727 
728 	ev_len = be16_to_cpu(event->length);
729 
730 	if (ev_len % 16)
731 		pr_info("event %zu has length %zu not divisible by 16: event=%pK\n",
732 				event_idx, ev_len, event);
733 
734 	ev_end = (__u8 *)event + ev_len;
735 	if (ev_end > end) {
736 		pr_warn("event %zu has .length=%zu, ends after buffer end: ev_end=%pK > end=%pK, offset=%zu\n",
737 				event_idx, ev_len, ev_end, end,
738 				offset);
739 		return -1;
740 	}
741 
742 	calc_ev_end = event_end(event, end);
743 	if (!calc_ev_end) {
744 		pr_warn("event %zu has a calculated length which exceeds buffer length %zu: event=%pK end=%pK, offset=%zu\n",
745 			event_idx, event_data_bytes, event, end,
746 			offset);
747 		return -1;
748 	}
749 
750 	if (calc_ev_end > ev_end) {
751 		pr_warn("event %zu exceeds it's own length: event=%pK, end=%pK, offset=%zu, calc_ev_end=%pK\n",
752 			event_idx, event, ev_end, offset, calc_ev_end);
753 		return -1;
754 	}
755 
756 	return ev_len;
757 }
758 
759 #define MAX_4K (SIZE_MAX / 4096)
760 
761 static int create_events_from_catalog(struct attribute ***events_,
762 				      struct attribute ***event_descs_,
763 				      struct attribute ***event_long_descs_)
764 {
765 	long hret;
766 	size_t catalog_len, catalog_page_len, event_entry_count,
767 	       event_data_len, event_data_offs,
768 	       event_data_bytes, junk_events, event_idx, event_attr_ct, i,
769 	       attr_max, event_idx_last, desc_ct, long_desc_ct;
770 	ssize_t ct, ev_len;
771 	uint64_t catalog_version_num;
772 	struct attribute **events, **event_descs, **event_long_descs;
773 	struct hv_24x7_catalog_page_0 *page_0 =
774 		kmem_cache_alloc(hv_page_cache, GFP_KERNEL);
775 	void *page = page_0;
776 	void *event_data, *end;
777 	struct hv_24x7_event_data *event;
778 	struct rb_root ev_uniq = RB_ROOT;
779 	int ret = 0;
780 
781 	if (!page) {
782 		ret = -ENOMEM;
783 		goto e_out;
784 	}
785 
786 	hret = h_get_24x7_catalog_page(page, 0, 0);
787 	if (hret) {
788 		ret = -EIO;
789 		goto e_free;
790 	}
791 
792 	catalog_version_num = be64_to_cpu(page_0->version);
793 	catalog_page_len = be32_to_cpu(page_0->length);
794 
795 	if (MAX_4K < catalog_page_len) {
796 		pr_err("invalid page count: %zu\n", catalog_page_len);
797 		ret = -EIO;
798 		goto e_free;
799 	}
800 
801 	catalog_len = catalog_page_len * 4096;
802 
803 	event_entry_count = be16_to_cpu(page_0->event_entry_count);
804 	event_data_offs   = be16_to_cpu(page_0->event_data_offs);
805 	event_data_len    = be16_to_cpu(page_0->event_data_len);
806 
807 	pr_devel("cv %llu cl %zu eec %zu edo %zu edl %zu\n",
808 			catalog_version_num, catalog_len,
809 			event_entry_count, event_data_offs, event_data_len);
810 
811 	if ((MAX_4K < event_data_len)
812 			|| (MAX_4K < event_data_offs)
813 			|| (MAX_4K - event_data_offs < event_data_len)) {
814 		pr_err("invalid event data offs %zu and/or len %zu\n",
815 				event_data_offs, event_data_len);
816 		ret = -EIO;
817 		goto e_free;
818 	}
819 
820 	if ((event_data_offs + event_data_len) > catalog_page_len) {
821 		pr_err("event data %zu-%zu does not fit inside catalog 0-%zu\n",
822 				event_data_offs,
823 				event_data_offs + event_data_len,
824 				catalog_page_len);
825 		ret = -EIO;
826 		goto e_free;
827 	}
828 
829 	if (SIZE_MAX - 1 < event_entry_count) {
830 		pr_err("event_entry_count %zu is invalid\n", event_entry_count);
831 		ret = -EIO;
832 		goto e_free;
833 	}
834 
835 	event_data_bytes = event_data_len * 4096;
836 
837 	/*
838 	 * event data can span several pages, events can cross between these
839 	 * pages. Use vmalloc to make this easier.
840 	 */
841 	event_data = vmalloc(event_data_bytes);
842 	if (!event_data) {
843 		pr_err("could not allocate event data\n");
844 		ret = -ENOMEM;
845 		goto e_free;
846 	}
847 
848 	end = event_data + event_data_bytes;
849 
850 	/*
851 	 * using vmalloc_to_phys() like this only works if PAGE_SIZE is
852 	 * divisible by 4096
853 	 */
854 	BUILD_BUG_ON(PAGE_SIZE % 4096);
855 
856 	for (i = 0; i < event_data_len; i++) {
857 		hret = h_get_24x7_catalog_page_(
858 				vmalloc_to_phys(event_data + i * 4096),
859 				catalog_version_num,
860 				i + event_data_offs);
861 		if (hret) {
862 			pr_err("Failed to get event data in page %zu: rc=%ld\n",
863 			       i + event_data_offs, hret);
864 			ret = -EIO;
865 			goto e_event_data;
866 		}
867 	}
868 
869 	/*
870 	 * scan the catalog to determine the number of attributes we need, and
871 	 * verify it at the same time.
872 	 */
873 	for (junk_events = 0, event = event_data, event_idx = 0, attr_max = 0;
874 	     ;
875 	     event_idx++, event = (void *)event + ev_len) {
876 		size_t offset = (void *)event - (void *)event_data;
877 		char *name;
878 		int nl;
879 
880 		ev_len = catalog_event_len_validate(event, event_idx,
881 						    event_data_bytes,
882 						    event_entry_count,
883 						    offset, end);
884 		if (ev_len < 0)
885 			break;
886 
887 		name = event_name(event, &nl);
888 
889 		if (event->event_group_record_len == 0) {
890 			pr_devel("invalid event %zu (%.*s): group_record_len == 0, skipping\n",
891 					event_idx, nl, name);
892 			junk_events++;
893 			continue;
894 		}
895 
896 		if (!catalog_entry_domain_is_valid(event->domain)) {
897 			pr_info("event %zu (%.*s) has invalid domain %d\n",
898 					event_idx, nl, name, event->domain);
899 			junk_events++;
900 			continue;
901 		}
902 
903 		attr_max++;
904 	}
905 
906 	event_idx_last = event_idx;
907 	if (event_idx_last != event_entry_count)
908 		pr_warn("event buffer ended before listed # of events were parsed (got %zu, wanted %zu, junk %zu)\n",
909 				event_idx_last, event_entry_count, junk_events);
910 
911 	events = kmalloc_array(attr_max + 1, sizeof(*events), GFP_KERNEL);
912 	if (!events) {
913 		ret = -ENOMEM;
914 		goto e_event_data;
915 	}
916 
917 	event_descs = kmalloc_array(event_idx + 1, sizeof(*event_descs),
918 				GFP_KERNEL);
919 	if (!event_descs) {
920 		ret = -ENOMEM;
921 		goto e_event_attrs;
922 	}
923 
924 	event_long_descs = kmalloc_array(event_idx + 1,
925 			sizeof(*event_long_descs), GFP_KERNEL);
926 	if (!event_long_descs) {
927 		ret = -ENOMEM;
928 		goto e_event_descs;
929 	}
930 
931 	/* Iterate over the catalog filling in the attribute vector */
932 	for (junk_events = 0, event_attr_ct = 0, desc_ct = 0, long_desc_ct = 0,
933 				event = event_data, event_idx = 0;
934 			event_idx < event_idx_last;
935 			event_idx++, ev_len = be16_to_cpu(event->length),
936 				event = (void *)event + ev_len) {
937 		char *name;
938 		int nl;
939 		int nonce;
940 		/*
941 		 * these are the only "bad" events that are intermixed and that
942 		 * we can ignore without issue. make sure to skip them here
943 		 */
944 		if (event->event_group_record_len == 0)
945 			continue;
946 		if (!catalog_entry_domain_is_valid(event->domain))
947 			continue;
948 
949 		name  = event_name(event, &nl);
950 		nonce = event_uniq_add(&ev_uniq, name, nl, event->domain);
951 		ct    = event_data_to_attrs(event_idx, events + event_attr_ct,
952 					    event, nonce);
953 		if (ct < 0) {
954 			pr_warn("event %zu (%.*s) creation failure, skipping\n",
955 				event_idx, nl, name);
956 			junk_events++;
957 		} else {
958 			event_attr_ct++;
959 			event_descs[desc_ct] = event_to_desc_attr(event, nonce);
960 			if (event_descs[desc_ct])
961 				desc_ct++;
962 			event_long_descs[long_desc_ct] =
963 					event_to_long_desc_attr(event, nonce);
964 			if (event_long_descs[long_desc_ct])
965 				long_desc_ct++;
966 		}
967 	}
968 
969 	pr_info("read %zu catalog entries, created %zu event attrs (%zu failures), %zu descs\n",
970 			event_idx, event_attr_ct, junk_events, desc_ct);
971 
972 	events[event_attr_ct] = NULL;
973 	event_descs[desc_ct] = NULL;
974 	event_long_descs[long_desc_ct] = NULL;
975 
976 	event_uniq_destroy(&ev_uniq);
977 	vfree(event_data);
978 	kmem_cache_free(hv_page_cache, page);
979 
980 	*events_ = events;
981 	*event_descs_ = event_descs;
982 	*event_long_descs_ = event_long_descs;
983 	return 0;
984 
985 e_event_descs:
986 	kfree(event_descs);
987 e_event_attrs:
988 	kfree(events);
989 e_event_data:
990 	vfree(event_data);
991 e_free:
992 	kmem_cache_free(hv_page_cache, page);
993 e_out:
994 	*events_ = NULL;
995 	*event_descs_ = NULL;
996 	*event_long_descs_ = NULL;
997 	return ret;
998 }
999 
1000 static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
1001 			    struct bin_attribute *bin_attr, char *buf,
1002 			    loff_t offset, size_t count)
1003 {
1004 	long hret;
1005 	ssize_t ret = 0;
1006 	size_t catalog_len = 0, catalog_page_len = 0;
1007 	loff_t page_offset = 0;
1008 	loff_t offset_in_page;
1009 	size_t copy_len;
1010 	uint64_t catalog_version_num = 0;
1011 	void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
1012 	struct hv_24x7_catalog_page_0 *page_0 = page;
1013 
1014 	if (!page)
1015 		return -ENOMEM;
1016 
1017 	hret = h_get_24x7_catalog_page(page, 0, 0);
1018 	if (hret) {
1019 		ret = -EIO;
1020 		goto e_free;
1021 	}
1022 
1023 	catalog_version_num = be64_to_cpu(page_0->version);
1024 	catalog_page_len = be32_to_cpu(page_0->length);
1025 	catalog_len = catalog_page_len * 4096;
1026 
1027 	page_offset = offset / 4096;
1028 	offset_in_page = offset % 4096;
1029 
1030 	if (page_offset >= catalog_page_len)
1031 		goto e_free;
1032 
1033 	if (page_offset != 0) {
1034 		hret = h_get_24x7_catalog_page(page, catalog_version_num,
1035 					       page_offset);
1036 		if (hret) {
1037 			ret = -EIO;
1038 			goto e_free;
1039 		}
1040 	}
1041 
1042 	copy_len = 4096 - offset_in_page;
1043 	if (copy_len > count)
1044 		copy_len = count;
1045 
1046 	memcpy(buf, page+offset_in_page, copy_len);
1047 	ret = copy_len;
1048 
1049 e_free:
1050 	if (hret)
1051 		pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
1052 		       " rc=%ld\n",
1053 		       catalog_version_num, page_offset, hret);
1054 	kmem_cache_free(hv_page_cache, page);
1055 
1056 	pr_devel("catalog_read: offset=%lld(%lld) count=%zu "
1057 			"catalog_len=%zu(%zu) => %zd\n", offset, page_offset,
1058 			count, catalog_len, catalog_page_len, ret);
1059 
1060 	return ret;
1061 }
1062 
1063 static ssize_t domains_show(struct device *dev, struct device_attribute *attr,
1064 			    char *page)
1065 {
1066 	int d, n, count = 0;
1067 	const char *str;
1068 
1069 	for (d = 0; d < HV_PERF_DOMAIN_MAX; d++) {
1070 		str = domain_name(d);
1071 		if (!str)
1072 			continue;
1073 
1074 		n = sprintf(page, "%d: %s\n", d, str);
1075 		if (n < 0)
1076 			break;
1077 
1078 		count += n;
1079 		page += n;
1080 	}
1081 	return count;
1082 }
1083 
1084 #define PAGE_0_ATTR(_name, _fmt, _expr)				\
1085 static ssize_t _name##_show(struct device *dev,			\
1086 			    struct device_attribute *dev_attr,	\
1087 			    char *buf)				\
1088 {								\
1089 	long hret;						\
1090 	ssize_t ret = 0;					\
1091 	void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);	\
1092 	struct hv_24x7_catalog_page_0 *page_0 = page;		\
1093 	if (!page)						\
1094 		return -ENOMEM;					\
1095 	hret = h_get_24x7_catalog_page(page, 0, 0);		\
1096 	if (hret) {						\
1097 		ret = -EIO;					\
1098 		goto e_free;					\
1099 	}							\
1100 	ret = sprintf(buf, _fmt, _expr);			\
1101 e_free:								\
1102 	kmem_cache_free(hv_page_cache, page);			\
1103 	return ret;						\
1104 }								\
1105 static DEVICE_ATTR_RO(_name)
1106 
1107 PAGE_0_ATTR(catalog_version, "%lld\n",
1108 		(unsigned long long)be64_to_cpu(page_0->version));
1109 PAGE_0_ATTR(catalog_len, "%lld\n",
1110 		(unsigned long long)be32_to_cpu(page_0->length) * 4096);
1111 static BIN_ATTR_RO(catalog, 0/* real length varies */);
1112 static DEVICE_ATTR_RO(domains);
1113 static DEVICE_ATTR_RO(sockets);
1114 static DEVICE_ATTR_RO(chipspersocket);
1115 static DEVICE_ATTR_RO(coresperchip);
1116 
1117 static struct bin_attribute *if_bin_attrs[] = {
1118 	&bin_attr_catalog,
1119 	NULL,
1120 };
1121 
1122 static struct attribute *if_attrs[] = {
1123 	&dev_attr_catalog_len.attr,
1124 	&dev_attr_catalog_version.attr,
1125 	&dev_attr_domains.attr,
1126 	&dev_attr_sockets.attr,
1127 	&dev_attr_chipspersocket.attr,
1128 	&dev_attr_coresperchip.attr,
1129 	NULL,
1130 };
1131 
1132 static struct attribute_group if_group = {
1133 	.name = "interface",
1134 	.bin_attrs = if_bin_attrs,
1135 	.attrs = if_attrs,
1136 };
1137 
1138 static const struct attribute_group *attr_groups[] = {
1139 	&format_group,
1140 	&event_group,
1141 	&event_desc_group,
1142 	&event_long_desc_group,
1143 	&if_group,
1144 	NULL,
1145 };
1146 
1147 /*
1148  * Start the process for a new H_GET_24x7_DATA hcall.
1149  */
1150 static void init_24x7_request(struct hv_24x7_request_buffer *request_buffer,
1151 			      struct hv_24x7_data_result_buffer *result_buffer)
1152 {
1153 
1154 	memset(request_buffer, 0, H24x7_DATA_BUFFER_SIZE);
1155 	memset(result_buffer, 0, H24x7_DATA_BUFFER_SIZE);
1156 
1157 	request_buffer->interface_version = interface_version;
1158 	/* memset above set request_buffer->num_requests to 0 */
1159 }
1160 
1161 /*
1162  * Commit (i.e perform) the H_GET_24x7_DATA hcall using the data collected
1163  * by 'init_24x7_request()' and 'add_event_to_24x7_request()'.
1164  */
1165 static int make_24x7_request(struct hv_24x7_request_buffer *request_buffer,
1166 			     struct hv_24x7_data_result_buffer *result_buffer)
1167 {
1168 	long ret;
1169 
1170 	/*
1171 	 * NOTE: Due to variable number of array elements in request and
1172 	 *	 result buffer(s), sizeof() is not reliable. Use the actual
1173 	 *	 allocated buffer size, H24x7_DATA_BUFFER_SIZE.
1174 	 */
1175 	ret = plpar_hcall_norets(H_GET_24X7_DATA,
1176 			virt_to_phys(request_buffer), H24x7_DATA_BUFFER_SIZE,
1177 			virt_to_phys(result_buffer),  H24x7_DATA_BUFFER_SIZE);
1178 
1179 	if (ret) {
1180 		struct hv_24x7_request *req;
1181 
1182 		req = request_buffer->requests;
1183 		pr_notice_ratelimited("hcall failed: [%d %#x %#x %d] => ret 0x%lx (%ld) detail=0x%x failing ix=%x\n",
1184 				      req->performance_domain, req->data_offset,
1185 				      req->starting_ix, req->starting_lpar_ix,
1186 				      ret, ret, result_buffer->detailed_rc,
1187 				      result_buffer->failing_request_ix);
1188 		return -EIO;
1189 	}
1190 
1191 	return 0;
1192 }
1193 
1194 /*
1195  * Add the given @event to the next slot in the 24x7 request_buffer.
1196  *
1197  * Note that H_GET_24X7_DATA hcall allows reading several counters'
1198  * values in a single HCALL. We expect the caller to add events to the
1199  * request buffer one by one, make the HCALL and process the results.
1200  */
1201 static int add_event_to_24x7_request(struct perf_event *event,
1202 				struct hv_24x7_request_buffer *request_buffer)
1203 {
1204 	u16 idx;
1205 	int i;
1206 	size_t req_size;
1207 	struct hv_24x7_request *req;
1208 
1209 	if (request_buffer->num_requests >=
1210 	    max_num_requests(request_buffer->interface_version)) {
1211 		pr_devel("Too many requests for 24x7 HCALL %d\n",
1212 				request_buffer->num_requests);
1213 		return -EINVAL;
1214 	}
1215 
1216 	switch (event_get_domain(event)) {
1217 	case HV_PERF_DOMAIN_PHYS_CHIP:
1218 		idx = event_get_chip(event);
1219 		break;
1220 	case HV_PERF_DOMAIN_PHYS_CORE:
1221 		idx = event_get_core(event);
1222 		break;
1223 	default:
1224 		idx = event_get_vcpu(event);
1225 	}
1226 
1227 	req_size = H24x7_REQUEST_SIZE(request_buffer->interface_version);
1228 
1229 	i = request_buffer->num_requests++;
1230 	req = (void *) request_buffer->requests + i * req_size;
1231 
1232 	req->performance_domain = event_get_domain(event);
1233 	req->data_size = cpu_to_be16(8);
1234 	req->data_offset = cpu_to_be32(event_get_offset(event));
1235 	req->starting_lpar_ix = cpu_to_be16(event_get_lpar(event));
1236 	req->max_num_lpars = cpu_to_be16(1);
1237 	req->starting_ix = cpu_to_be16(idx);
1238 	req->max_ix = cpu_to_be16(1);
1239 
1240 	if (request_buffer->interface_version > 1) {
1241 		if (domain_needs_aggregation(req->performance_domain))
1242 			req->max_num_thread_groups = -1;
1243 		else if (req->performance_domain != HV_PERF_DOMAIN_PHYS_CHIP) {
1244 			req->starting_thread_group_ix = idx % 2;
1245 			req->max_num_thread_groups = 1;
1246 		}
1247 	}
1248 
1249 	return 0;
1250 }
1251 
1252 /**
1253  * get_count_from_result - get event count from all result elements in result
1254  *
1255  * If the event corresponding to this result needs aggregation of the result
1256  * element values, then this function does that.
1257  *
1258  * @event:	Event associated with @res.
1259  * @resb:	Result buffer containing @res.
1260  * @res:	Result to work on.
1261  * @countp:	Output variable containing the event count.
1262  * @next:	Optional output variable pointing to the next result in @resb.
1263  */
1264 static int get_count_from_result(struct perf_event *event,
1265 				 struct hv_24x7_data_result_buffer *resb,
1266 				 struct hv_24x7_result *res, u64 *countp,
1267 				 struct hv_24x7_result **next)
1268 {
1269 	u16 num_elements = be16_to_cpu(res->num_elements_returned);
1270 	u16 data_size = be16_to_cpu(res->result_element_data_size);
1271 	unsigned int data_offset;
1272 	void *element_data;
1273 	int i;
1274 	u64 count;
1275 
1276 	/*
1277 	 * We can bail out early if the result is empty.
1278 	 */
1279 	if (!num_elements) {
1280 		pr_debug("Result of request %hhu is empty, nothing to do\n",
1281 			 res->result_ix);
1282 
1283 		if (next)
1284 			*next = (struct hv_24x7_result *) res->elements;
1285 
1286 		return -ENODATA;
1287 	}
1288 
1289 	/*
1290 	 * Since we always specify 1 as the maximum for the smallest resource
1291 	 * we're requesting, there should to be only one element per result.
1292 	 * Except when an event needs aggregation, in which case there are more.
1293 	 */
1294 	if (num_elements != 1 &&
1295 	    !domain_needs_aggregation(event_get_domain(event))) {
1296 		pr_err("Error: result of request %hhu has %hu elements\n",
1297 		       res->result_ix, num_elements);
1298 
1299 		return -EIO;
1300 	}
1301 
1302 	if (data_size != sizeof(u64)) {
1303 		pr_debug("Error: result of request %hhu has data of %hu bytes\n",
1304 			 res->result_ix, data_size);
1305 
1306 		return -ENOTSUPP;
1307 	}
1308 
1309 	if (resb->interface_version == 1)
1310 		data_offset = offsetof(struct hv_24x7_result_element_v1,
1311 				       element_data);
1312 	else
1313 		data_offset = offsetof(struct hv_24x7_result_element_v2,
1314 				       element_data);
1315 
1316 	/* Go through the result elements in the result. */
1317 	for (i = count = 0, element_data = res->elements + data_offset;
1318 	     i < num_elements;
1319 	     i++, element_data += data_size + data_offset)
1320 		count += be64_to_cpu(*((u64 *) element_data));
1321 
1322 	*countp = count;
1323 
1324 	/* The next result is after the last result element. */
1325 	if (next)
1326 		*next = element_data - data_offset;
1327 
1328 	return 0;
1329 }
1330 
1331 static int single_24x7_request(struct perf_event *event, u64 *count)
1332 {
1333 	int ret;
1334 	struct hv_24x7_request_buffer *request_buffer;
1335 	struct hv_24x7_data_result_buffer *result_buffer;
1336 
1337 	BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
1338 	BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
1339 
1340 	request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
1341 	result_buffer = (void *)get_cpu_var(hv_24x7_resb);
1342 
1343 	init_24x7_request(request_buffer, result_buffer);
1344 
1345 	ret = add_event_to_24x7_request(event, request_buffer);
1346 	if (ret)
1347 		goto out;
1348 
1349 	ret = make_24x7_request(request_buffer, result_buffer);
1350 	if (ret)
1351 		goto out;
1352 
1353 	/* process result from hcall */
1354 	ret = get_count_from_result(event, result_buffer,
1355 				    result_buffer->results, count, NULL);
1356 
1357 out:
1358 	put_cpu_var(hv_24x7_reqb);
1359 	put_cpu_var(hv_24x7_resb);
1360 	return ret;
1361 }
1362 
1363 
1364 static int h_24x7_event_init(struct perf_event *event)
1365 {
1366 	struct hv_perf_caps caps;
1367 	unsigned domain;
1368 	unsigned long hret;
1369 	u64 ct;
1370 
1371 	/* Not our event */
1372 	if (event->attr.type != event->pmu->type)
1373 		return -ENOENT;
1374 
1375 	/* Unused areas must be 0 */
1376 	if (event_get_reserved1(event) ||
1377 	    event_get_reserved2(event) ||
1378 	    event_get_reserved3(event)) {
1379 		pr_devel("reserved set when forbidden 0x%llx(0x%llx) 0x%llx(0x%llx) 0x%llx(0x%llx)\n",
1380 				event->attr.config,
1381 				event_get_reserved1(event),
1382 				event->attr.config1,
1383 				event_get_reserved2(event),
1384 				event->attr.config2,
1385 				event_get_reserved3(event));
1386 		return -EINVAL;
1387 	}
1388 
1389 	/* no branch sampling */
1390 	if (has_branch_stack(event))
1391 		return -EOPNOTSUPP;
1392 
1393 	/* offset must be 8 byte aligned */
1394 	if (event_get_offset(event) % 8) {
1395 		pr_devel("bad alignment\n");
1396 		return -EINVAL;
1397 	}
1398 
1399 	domain = event_get_domain(event);
1400 	if (domain >= HV_PERF_DOMAIN_MAX) {
1401 		pr_devel("invalid domain %d\n", domain);
1402 		return -EINVAL;
1403 	}
1404 
1405 	hret = hv_perf_caps_get(&caps);
1406 	if (hret) {
1407 		pr_devel("could not get capabilities: rc=%ld\n", hret);
1408 		return -EIO;
1409 	}
1410 
1411 	/* Physical domains & other lpars require extra capabilities */
1412 	if (!caps.collect_privileged && (is_physical_domain(domain) ||
1413 		(event_get_lpar(event) != event_get_lpar_max()))) {
1414 		pr_devel("hv permissions disallow: is_physical_domain:%d, lpar=0x%llx\n",
1415 				is_physical_domain(domain),
1416 				event_get_lpar(event));
1417 		return -EACCES;
1418 	}
1419 
1420 	/* Get the initial value of the counter for this event */
1421 	if (single_24x7_request(event, &ct)) {
1422 		pr_devel("test hcall failed\n");
1423 		return -EIO;
1424 	}
1425 	(void)local64_xchg(&event->hw.prev_count, ct);
1426 
1427 	return 0;
1428 }
1429 
1430 static u64 h_24x7_get_value(struct perf_event *event)
1431 {
1432 	u64 ct;
1433 
1434 	if (single_24x7_request(event, &ct))
1435 		/* We checked this in event init, shouldn't fail here... */
1436 		return 0;
1437 
1438 	return ct;
1439 }
1440 
1441 static void update_event_count(struct perf_event *event, u64 now)
1442 {
1443 	s64 prev;
1444 
1445 	prev = local64_xchg(&event->hw.prev_count, now);
1446 	local64_add(now - prev, &event->count);
1447 }
1448 
1449 static void h_24x7_event_read(struct perf_event *event)
1450 {
1451 	u64 now;
1452 	struct hv_24x7_request_buffer *request_buffer;
1453 	struct hv_24x7_hw *h24x7hw;
1454 	int txn_flags;
1455 
1456 	txn_flags = __this_cpu_read(hv_24x7_txn_flags);
1457 
1458 	/*
1459 	 * If in a READ transaction, add this counter to the list of
1460 	 * counters to read during the next HCALL (i.e commit_txn()).
1461 	 * If not in a READ transaction, go ahead and make the HCALL
1462 	 * to read this counter by itself.
1463 	 */
1464 
1465 	if (txn_flags & PERF_PMU_TXN_READ) {
1466 		int i;
1467 		int ret;
1468 
1469 		if (__this_cpu_read(hv_24x7_txn_err))
1470 			return;
1471 
1472 		request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
1473 
1474 		ret = add_event_to_24x7_request(event, request_buffer);
1475 		if (ret) {
1476 			__this_cpu_write(hv_24x7_txn_err, ret);
1477 		} else {
1478 			/*
1479 			 * Associate the event with the HCALL request index,
1480 			 * so ->commit_txn() can quickly find/update count.
1481 			 */
1482 			i = request_buffer->num_requests - 1;
1483 
1484 			h24x7hw = &get_cpu_var(hv_24x7_hw);
1485 			h24x7hw->events[i] = event;
1486 			put_cpu_var(h24x7hw);
1487 		}
1488 
1489 		put_cpu_var(hv_24x7_reqb);
1490 	} else {
1491 		now = h_24x7_get_value(event);
1492 		update_event_count(event, now);
1493 	}
1494 }
1495 
1496 static void h_24x7_event_start(struct perf_event *event, int flags)
1497 {
1498 	if (flags & PERF_EF_RELOAD)
1499 		local64_set(&event->hw.prev_count, h_24x7_get_value(event));
1500 }
1501 
1502 static void h_24x7_event_stop(struct perf_event *event, int flags)
1503 {
1504 	h_24x7_event_read(event);
1505 }
1506 
1507 static int h_24x7_event_add(struct perf_event *event, int flags)
1508 {
1509 	if (flags & PERF_EF_START)
1510 		h_24x7_event_start(event, flags);
1511 
1512 	return 0;
1513 }
1514 
1515 /*
1516  * 24x7 counters only support READ transactions. They are
1517  * always counting and dont need/support ADD transactions.
1518  * Cache the flags, but otherwise ignore transactions that
1519  * are not PERF_PMU_TXN_READ.
1520  */
1521 static void h_24x7_event_start_txn(struct pmu *pmu, unsigned int flags)
1522 {
1523 	struct hv_24x7_request_buffer *request_buffer;
1524 	struct hv_24x7_data_result_buffer *result_buffer;
1525 
1526 	/* We should not be called if we are already in a txn */
1527 	WARN_ON_ONCE(__this_cpu_read(hv_24x7_txn_flags));
1528 
1529 	__this_cpu_write(hv_24x7_txn_flags, flags);
1530 	if (flags & ~PERF_PMU_TXN_READ)
1531 		return;
1532 
1533 	request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
1534 	result_buffer = (void *)get_cpu_var(hv_24x7_resb);
1535 
1536 	init_24x7_request(request_buffer, result_buffer);
1537 
1538 	put_cpu_var(hv_24x7_resb);
1539 	put_cpu_var(hv_24x7_reqb);
1540 }
1541 
1542 /*
1543  * Clean up transaction state.
1544  *
1545  * NOTE: Ignore state of request and result buffers for now.
1546  *	 We will initialize them during the next read/txn.
1547  */
1548 static void reset_txn(void)
1549 {
1550 	__this_cpu_write(hv_24x7_txn_flags, 0);
1551 	__this_cpu_write(hv_24x7_txn_err, 0);
1552 }
1553 
1554 /*
1555  * 24x7 counters only support READ transactions. They are always counting
1556  * and dont need/support ADD transactions. Clear ->txn_flags but otherwise
1557  * ignore transactions that are not of type PERF_PMU_TXN_READ.
1558  *
1559  * For READ transactions, submit all pending 24x7 requests (i.e requests
1560  * that were queued by h_24x7_event_read()), to the hypervisor and update
1561  * the event counts.
1562  */
1563 static int h_24x7_event_commit_txn(struct pmu *pmu)
1564 {
1565 	struct hv_24x7_request_buffer *request_buffer;
1566 	struct hv_24x7_data_result_buffer *result_buffer;
1567 	struct hv_24x7_result *res, *next_res;
1568 	u64 count;
1569 	int i, ret, txn_flags;
1570 	struct hv_24x7_hw *h24x7hw;
1571 
1572 	txn_flags = __this_cpu_read(hv_24x7_txn_flags);
1573 	WARN_ON_ONCE(!txn_flags);
1574 
1575 	ret = 0;
1576 	if (txn_flags & ~PERF_PMU_TXN_READ)
1577 		goto out;
1578 
1579 	ret = __this_cpu_read(hv_24x7_txn_err);
1580 	if (ret)
1581 		goto out;
1582 
1583 	request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
1584 	result_buffer = (void *)get_cpu_var(hv_24x7_resb);
1585 
1586 	ret = make_24x7_request(request_buffer, result_buffer);
1587 	if (ret)
1588 		goto put_reqb;
1589 
1590 	h24x7hw = &get_cpu_var(hv_24x7_hw);
1591 
1592 	/* Go through results in the result buffer to update event counts. */
1593 	for (i = 0, res = result_buffer->results;
1594 	     i < result_buffer->num_results; i++, res = next_res) {
1595 		struct perf_event *event = h24x7hw->events[res->result_ix];
1596 
1597 		ret = get_count_from_result(event, result_buffer, res, &count,
1598 					    &next_res);
1599 		if (ret)
1600 			break;
1601 
1602 		update_event_count(event, count);
1603 	}
1604 
1605 	put_cpu_var(hv_24x7_hw);
1606 
1607 put_reqb:
1608 	put_cpu_var(hv_24x7_resb);
1609 	put_cpu_var(hv_24x7_reqb);
1610 out:
1611 	reset_txn();
1612 	return ret;
1613 }
1614 
1615 /*
1616  * 24x7 counters only support READ transactions. They are always counting
1617  * and dont need/support ADD transactions. However, regardless of type
1618  * of transaction, all we need to do is cleanup, so we don't have to check
1619  * the type of transaction.
1620  */
1621 static void h_24x7_event_cancel_txn(struct pmu *pmu)
1622 {
1623 	WARN_ON_ONCE(!__this_cpu_read(hv_24x7_txn_flags));
1624 	reset_txn();
1625 }
1626 
1627 static struct pmu h_24x7_pmu = {
1628 	.task_ctx_nr = perf_invalid_context,
1629 
1630 	.name = "hv_24x7",
1631 	.attr_groups = attr_groups,
1632 	.event_init  = h_24x7_event_init,
1633 	.add         = h_24x7_event_add,
1634 	.del         = h_24x7_event_stop,
1635 	.start       = h_24x7_event_start,
1636 	.stop        = h_24x7_event_stop,
1637 	.read        = h_24x7_event_read,
1638 	.start_txn   = h_24x7_event_start_txn,
1639 	.commit_txn  = h_24x7_event_commit_txn,
1640 	.cancel_txn  = h_24x7_event_cancel_txn,
1641 	.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
1642 };
1643 
1644 static int hv_24x7_init(void)
1645 {
1646 	int r;
1647 	unsigned long hret;
1648 	struct hv_perf_caps caps;
1649 
1650 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
1651 		pr_debug("not a virtualized system, not enabling\n");
1652 		return -ENODEV;
1653 	} else if (!cur_cpu_spec->oprofile_cpu_type)
1654 		return -ENODEV;
1655 
1656 	/* POWER8 only supports v1, while POWER9 only supports v2. */
1657 	if (!strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
1658 		interface_version = 1;
1659 	else {
1660 		interface_version = 2;
1661 
1662 		/* SMT8 in POWER9 needs to aggregate result elements. */
1663 		if (threads_per_core == 8)
1664 			aggregate_result_elements = true;
1665 	}
1666 
1667 	hret = hv_perf_caps_get(&caps);
1668 	if (hret) {
1669 		pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
1670 				hret);
1671 		return -ENODEV;
1672 	}
1673 
1674 	hv_page_cache = kmem_cache_create("hv-page-4096", 4096, 4096, 0, NULL);
1675 	if (!hv_page_cache)
1676 		return -ENOMEM;
1677 
1678 	/* sampling not supported */
1679 	h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1680 
1681 	r = create_events_from_catalog(&event_group.attrs,
1682 				   &event_desc_group.attrs,
1683 				   &event_long_desc_group.attrs);
1684 
1685 	if (r)
1686 		return r;
1687 
1688 	r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
1689 	if (r)
1690 		return r;
1691 
1692 	read_24x7_sys_info();
1693 
1694 	return 0;
1695 }
1696 
1697 device_initcall(hv_24x7_init);
1698