xref: /linux/tools/perf/util/python.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <Python.h>
3 #include <structmember.h>
4 #include <inttypes.h>
5 #include <poll.h>
6 #include <linux/err.h>
7 #include <perf/cpumap.h>
8 #ifdef HAVE_LIBTRACEEVENT
9 #include <traceevent/event-parse.h>
10 #endif
11 #include <perf/mmap.h>
12 #include "evlist.h"
13 #include "evsel.h"
14 #include "event.h"
15 #include "print_binary.h"
16 #include "thread_map.h"
17 #include "trace-event.h"
18 #include "mmap.h"
19 #include "util/bpf-filter.h"
20 #include "util/env.h"
21 #include "util/kvm-stat.h"
22 #include "util/stat.h"
23 #include "util/kwork.h"
24 #include "util/sample.h"
25 #include "util/lock-contention.h"
26 #include <internal/lib.h>
27 #include "../builtin.h"
28 
29 #if PY_MAJOR_VERSION < 3
30 #define _PyUnicode_FromString(arg) \
31   PyString_FromString(arg)
32 #define _PyUnicode_AsString(arg) \
33   PyString_AsString(arg)
34 #define _PyUnicode_FromFormat(...) \
35   PyString_FromFormat(__VA_ARGS__)
36 #define _PyLong_FromLong(arg) \
37   PyInt_FromLong(arg)
38 
39 #else
40 
41 #define _PyUnicode_FromString(arg) \
42   PyUnicode_FromString(arg)
43 #define _PyUnicode_FromFormat(...) \
44   PyUnicode_FromFormat(__VA_ARGS__)
45 #define _PyLong_FromLong(arg) \
46   PyLong_FromLong(arg)
47 #endif
48 
49 #ifndef Py_TYPE
50 #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
51 #endif
52 
53 /* Define PyVarObject_HEAD_INIT for python 2.5 */
54 #ifndef PyVarObject_HEAD_INIT
55 # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
56 #endif
57 
58 #if PY_MAJOR_VERSION < 3
59 PyMODINIT_FUNC initperf(void);
60 #else
61 PyMODINIT_FUNC PyInit_perf(void);
62 #endif
63 
64 #define member_def(type, member, ptype, help) \
65 	{ #member, ptype, \
66 	  offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
67 	  0, help }
68 
69 #define sample_member_def(name, member, ptype, help) \
70 	{ #name, ptype, \
71 	  offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
72 	  0, help }
73 
74 struct pyrf_event {
75 	PyObject_HEAD
76 	struct evsel *evsel;
77 	struct perf_sample sample;
78 	union perf_event   event;
79 };
80 
81 #define sample_members \
82 	sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"),			 \
83 	sample_member_def(sample_pid, pid, T_INT, "event pid"),			 \
84 	sample_member_def(sample_tid, tid, T_INT, "event tid"),			 \
85 	sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"),		 \
86 	sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"),		 \
87 	sample_member_def(sample_id, id, T_ULONGLONG, "event id"),			 \
88 	sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
89 	sample_member_def(sample_period, period, T_ULONGLONG, "event period"),		 \
90 	sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
91 
92 static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
93 
94 static PyMemberDef pyrf_mmap_event__members[] = {
95 	sample_members
96 	member_def(perf_event_header, type, T_UINT, "event type"),
97 	member_def(perf_event_header, misc, T_UINT, "event misc"),
98 	member_def(perf_record_mmap, pid, T_UINT, "event pid"),
99 	member_def(perf_record_mmap, tid, T_UINT, "event tid"),
100 	member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"),
101 	member_def(perf_record_mmap, len, T_ULONGLONG, "map length"),
102 	member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"),
103 	member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"),
104 	{ .name = NULL, },
105 };
106 
107 static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
108 {
109 	PyObject *ret;
110 	char *s;
111 
112 	if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", "
113 			 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", "
114 			 "filename: %s }",
115 		     pevent->event.mmap.pid, pevent->event.mmap.tid,
116 		     pevent->event.mmap.start, pevent->event.mmap.len,
117 		     pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
118 		ret = PyErr_NoMemory();
119 	} else {
120 		ret = _PyUnicode_FromString(s);
121 		free(s);
122 	}
123 	return ret;
124 }
125 
126 static PyTypeObject pyrf_mmap_event__type = {
127 	PyVarObject_HEAD_INIT(NULL, 0)
128 	.tp_name	= "perf.mmap_event",
129 	.tp_basicsize	= sizeof(struct pyrf_event),
130 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
131 	.tp_doc		= pyrf_mmap_event__doc,
132 	.tp_members	= pyrf_mmap_event__members,
133 	.tp_repr	= (reprfunc)pyrf_mmap_event__repr,
134 };
135 
136 static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
137 
138 static PyMemberDef pyrf_task_event__members[] = {
139 	sample_members
140 	member_def(perf_event_header, type, T_UINT, "event type"),
141 	member_def(perf_record_fork, pid, T_UINT, "event pid"),
142 	member_def(perf_record_fork, ppid, T_UINT, "event ppid"),
143 	member_def(perf_record_fork, tid, T_UINT, "event tid"),
144 	member_def(perf_record_fork, ptid, T_UINT, "event ptid"),
145 	member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"),
146 	{ .name = NULL, },
147 };
148 
149 static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
150 {
151 	return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
152 				   "ptid: %u, time: %" PRI_lu64 "}",
153 				   pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
154 				   pevent->event.fork.pid,
155 				   pevent->event.fork.ppid,
156 				   pevent->event.fork.tid,
157 				   pevent->event.fork.ptid,
158 				   pevent->event.fork.time);
159 }
160 
161 static PyTypeObject pyrf_task_event__type = {
162 	PyVarObject_HEAD_INIT(NULL, 0)
163 	.tp_name	= "perf.task_event",
164 	.tp_basicsize	= sizeof(struct pyrf_event),
165 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
166 	.tp_doc		= pyrf_task_event__doc,
167 	.tp_members	= pyrf_task_event__members,
168 	.tp_repr	= (reprfunc)pyrf_task_event__repr,
169 };
170 
171 static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
172 
173 static PyMemberDef pyrf_comm_event__members[] = {
174 	sample_members
175 	member_def(perf_event_header, type, T_UINT, "event type"),
176 	member_def(perf_record_comm, pid, T_UINT, "event pid"),
177 	member_def(perf_record_comm, tid, T_UINT, "event tid"),
178 	member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"),
179 	{ .name = NULL, },
180 };
181 
182 static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
183 {
184 	return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
185 				   pevent->event.comm.pid,
186 				   pevent->event.comm.tid,
187 				   pevent->event.comm.comm);
188 }
189 
190 static PyTypeObject pyrf_comm_event__type = {
191 	PyVarObject_HEAD_INIT(NULL, 0)
192 	.tp_name	= "perf.comm_event",
193 	.tp_basicsize	= sizeof(struct pyrf_event),
194 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
195 	.tp_doc		= pyrf_comm_event__doc,
196 	.tp_members	= pyrf_comm_event__members,
197 	.tp_repr	= (reprfunc)pyrf_comm_event__repr,
198 };
199 
200 static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
201 
202 static PyMemberDef pyrf_throttle_event__members[] = {
203 	sample_members
204 	member_def(perf_event_header, type, T_UINT, "event type"),
205 	member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"),
206 	member_def(perf_record_throttle, id, T_ULONGLONG, "event id"),
207 	member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"),
208 	{ .name = NULL, },
209 };
210 
211 static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
212 {
213 	struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1);
214 
215 	return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64
216 				   ", stream_id: %" PRI_lu64 " }",
217 				   pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
218 				   te->time, te->id, te->stream_id);
219 }
220 
221 static PyTypeObject pyrf_throttle_event__type = {
222 	PyVarObject_HEAD_INIT(NULL, 0)
223 	.tp_name	= "perf.throttle_event",
224 	.tp_basicsize	= sizeof(struct pyrf_event),
225 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
226 	.tp_doc		= pyrf_throttle_event__doc,
227 	.tp_members	= pyrf_throttle_event__members,
228 	.tp_repr	= (reprfunc)pyrf_throttle_event__repr,
229 };
230 
231 static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
232 
233 static PyMemberDef pyrf_lost_event__members[] = {
234 	sample_members
235 	member_def(perf_record_lost, id, T_ULONGLONG, "event id"),
236 	member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"),
237 	{ .name = NULL, },
238 };
239 
240 static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent)
241 {
242 	PyObject *ret;
243 	char *s;
244 
245 	if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", "
246 			 "lost: %#" PRI_lx64 " }",
247 		     pevent->event.lost.id, pevent->event.lost.lost) < 0) {
248 		ret = PyErr_NoMemory();
249 	} else {
250 		ret = _PyUnicode_FromString(s);
251 		free(s);
252 	}
253 	return ret;
254 }
255 
256 static PyTypeObject pyrf_lost_event__type = {
257 	PyVarObject_HEAD_INIT(NULL, 0)
258 	.tp_name	= "perf.lost_event",
259 	.tp_basicsize	= sizeof(struct pyrf_event),
260 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
261 	.tp_doc		= pyrf_lost_event__doc,
262 	.tp_members	= pyrf_lost_event__members,
263 	.tp_repr	= (reprfunc)pyrf_lost_event__repr,
264 };
265 
266 static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
267 
268 static PyMemberDef pyrf_read_event__members[] = {
269 	sample_members
270 	member_def(perf_record_read, pid, T_UINT, "event pid"),
271 	member_def(perf_record_read, tid, T_UINT, "event tid"),
272 	{ .name = NULL, },
273 };
274 
275 static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent)
276 {
277 	return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }",
278 				   pevent->event.read.pid,
279 				   pevent->event.read.tid);
280 	/*
281  	 * FIXME: return the array of read values,
282  	 * making this method useful ;-)
283  	 */
284 }
285 
286 static PyTypeObject pyrf_read_event__type = {
287 	PyVarObject_HEAD_INIT(NULL, 0)
288 	.tp_name	= "perf.read_event",
289 	.tp_basicsize	= sizeof(struct pyrf_event),
290 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
291 	.tp_doc		= pyrf_read_event__doc,
292 	.tp_members	= pyrf_read_event__members,
293 	.tp_repr	= (reprfunc)pyrf_read_event__repr,
294 };
295 
296 static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
297 
298 static PyMemberDef pyrf_sample_event__members[] = {
299 	sample_members
300 	member_def(perf_event_header, type, T_UINT, "event type"),
301 	{ .name = NULL, },
302 };
303 
304 static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
305 {
306 	PyObject *ret;
307 	char *s;
308 
309 	if (asprintf(&s, "{ type: sample }") < 0) {
310 		ret = PyErr_NoMemory();
311 	} else {
312 		ret = _PyUnicode_FromString(s);
313 		free(s);
314 	}
315 	return ret;
316 }
317 
318 #ifdef HAVE_LIBTRACEEVENT
319 static bool is_tracepoint(struct pyrf_event *pevent)
320 {
321 	return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT;
322 }
323 
324 static PyObject*
325 tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
326 {
327 	struct tep_handle *pevent = field->event->tep;
328 	void *data = pe->sample.raw_data;
329 	PyObject *ret = NULL;
330 	unsigned long long val;
331 	unsigned int offset, len;
332 
333 	if (field->flags & TEP_FIELD_IS_ARRAY) {
334 		offset = field->offset;
335 		len    = field->size;
336 		if (field->flags & TEP_FIELD_IS_DYNAMIC) {
337 			val     = tep_read_number(pevent, data + offset, len);
338 			offset  = val;
339 			len     = offset >> 16;
340 			offset &= 0xffff;
341 			if (tep_field_is_relative(field->flags))
342 				offset += field->offset + field->size;
343 		}
344 		if (field->flags & TEP_FIELD_IS_STRING &&
345 		    is_printable_array(data + offset, len)) {
346 			ret = _PyUnicode_FromString((char *)data + offset);
347 		} else {
348 			ret = PyByteArray_FromStringAndSize((const char *) data + offset, len);
349 			field->flags &= ~TEP_FIELD_IS_STRING;
350 		}
351 	} else {
352 		val = tep_read_number(pevent, data + field->offset,
353 				      field->size);
354 		if (field->flags & TEP_FIELD_IS_POINTER)
355 			ret = PyLong_FromUnsignedLong((unsigned long) val);
356 		else if (field->flags & TEP_FIELD_IS_SIGNED)
357 			ret = PyLong_FromLong((long) val);
358 		else
359 			ret = PyLong_FromUnsignedLong((unsigned long) val);
360 	}
361 
362 	return ret;
363 }
364 
365 static PyObject*
366 get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
367 {
368 	const char *str = _PyUnicode_AsString(PyObject_Str(attr_name));
369 	struct evsel *evsel = pevent->evsel;
370 	struct tep_format_field *field;
371 
372 	if (!evsel->tp_format) {
373 		struct tep_event *tp_format;
374 
375 		tp_format = trace_event__tp_format_id(evsel->core.attr.config);
376 		if (IS_ERR_OR_NULL(tp_format))
377 			return NULL;
378 
379 		evsel->tp_format = tp_format;
380 	}
381 
382 	field = tep_find_any_field(evsel->tp_format, str);
383 	if (!field)
384 		return NULL;
385 
386 	return tracepoint_field(pevent, field);
387 }
388 #endif /* HAVE_LIBTRACEEVENT */
389 
390 static PyObject*
391 pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name)
392 {
393 	PyObject *obj = NULL;
394 
395 #ifdef HAVE_LIBTRACEEVENT
396 	if (is_tracepoint(pevent))
397 		obj = get_tracepoint_field(pevent, attr_name);
398 #endif
399 
400 	return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name);
401 }
402 
403 static PyTypeObject pyrf_sample_event__type = {
404 	PyVarObject_HEAD_INIT(NULL, 0)
405 	.tp_name	= "perf.sample_event",
406 	.tp_basicsize	= sizeof(struct pyrf_event),
407 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
408 	.tp_doc		= pyrf_sample_event__doc,
409 	.tp_members	= pyrf_sample_event__members,
410 	.tp_repr	= (reprfunc)pyrf_sample_event__repr,
411 	.tp_getattro	= (getattrofunc) pyrf_sample_event__getattro,
412 };
413 
414 static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
415 
416 static PyMemberDef pyrf_context_switch_event__members[] = {
417 	sample_members
418 	member_def(perf_event_header, type, T_UINT, "event type"),
419 	member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"),
420 	member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"),
421 	{ .name = NULL, },
422 };
423 
424 static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent)
425 {
426 	PyObject *ret;
427 	char *s;
428 
429 	if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }",
430 		     pevent->event.context_switch.next_prev_pid,
431 		     pevent->event.context_switch.next_prev_tid,
432 		     !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) {
433 		ret = PyErr_NoMemory();
434 	} else {
435 		ret = _PyUnicode_FromString(s);
436 		free(s);
437 	}
438 	return ret;
439 }
440 
441 static PyTypeObject pyrf_context_switch_event__type = {
442 	PyVarObject_HEAD_INIT(NULL, 0)
443 	.tp_name	= "perf.context_switch_event",
444 	.tp_basicsize	= sizeof(struct pyrf_event),
445 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
446 	.tp_doc		= pyrf_context_switch_event__doc,
447 	.tp_members	= pyrf_context_switch_event__members,
448 	.tp_repr	= (reprfunc)pyrf_context_switch_event__repr,
449 };
450 
451 static int pyrf_event__setup_types(void)
452 {
453 	int err;
454 	pyrf_mmap_event__type.tp_new =
455 	pyrf_task_event__type.tp_new =
456 	pyrf_comm_event__type.tp_new =
457 	pyrf_lost_event__type.tp_new =
458 	pyrf_read_event__type.tp_new =
459 	pyrf_sample_event__type.tp_new =
460 	pyrf_context_switch_event__type.tp_new =
461 	pyrf_throttle_event__type.tp_new = PyType_GenericNew;
462 	err = PyType_Ready(&pyrf_mmap_event__type);
463 	if (err < 0)
464 		goto out;
465 	err = PyType_Ready(&pyrf_lost_event__type);
466 	if (err < 0)
467 		goto out;
468 	err = PyType_Ready(&pyrf_task_event__type);
469 	if (err < 0)
470 		goto out;
471 	err = PyType_Ready(&pyrf_comm_event__type);
472 	if (err < 0)
473 		goto out;
474 	err = PyType_Ready(&pyrf_throttle_event__type);
475 	if (err < 0)
476 		goto out;
477 	err = PyType_Ready(&pyrf_read_event__type);
478 	if (err < 0)
479 		goto out;
480 	err = PyType_Ready(&pyrf_sample_event__type);
481 	if (err < 0)
482 		goto out;
483 	err = PyType_Ready(&pyrf_context_switch_event__type);
484 	if (err < 0)
485 		goto out;
486 out:
487 	return err;
488 }
489 
490 static PyTypeObject *pyrf_event__type[] = {
491 	[PERF_RECORD_MMAP]	 = &pyrf_mmap_event__type,
492 	[PERF_RECORD_LOST]	 = &pyrf_lost_event__type,
493 	[PERF_RECORD_COMM]	 = &pyrf_comm_event__type,
494 	[PERF_RECORD_EXIT]	 = &pyrf_task_event__type,
495 	[PERF_RECORD_THROTTLE]	 = &pyrf_throttle_event__type,
496 	[PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
497 	[PERF_RECORD_FORK]	 = &pyrf_task_event__type,
498 	[PERF_RECORD_READ]	 = &pyrf_read_event__type,
499 	[PERF_RECORD_SAMPLE]	 = &pyrf_sample_event__type,
500 	[PERF_RECORD_SWITCH]	 = &pyrf_context_switch_event__type,
501 	[PERF_RECORD_SWITCH_CPU_WIDE]  = &pyrf_context_switch_event__type,
502 };
503 
504 static PyObject *pyrf_event__new(union perf_event *event)
505 {
506 	struct pyrf_event *pevent;
507 	PyTypeObject *ptype;
508 
509 	if ((event->header.type < PERF_RECORD_MMAP ||
510 	     event->header.type > PERF_RECORD_SAMPLE) &&
511 	    !(event->header.type == PERF_RECORD_SWITCH ||
512 	      event->header.type == PERF_RECORD_SWITCH_CPU_WIDE))
513 		return NULL;
514 
515 	ptype = pyrf_event__type[event->header.type];
516 	pevent = PyObject_New(struct pyrf_event, ptype);
517 	if (pevent != NULL)
518 		memcpy(&pevent->event, event, event->header.size);
519 	return (PyObject *)pevent;
520 }
521 
522 struct pyrf_cpu_map {
523 	PyObject_HEAD
524 
525 	struct perf_cpu_map *cpus;
526 };
527 
528 static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
529 			      PyObject *args, PyObject *kwargs)
530 {
531 	static char *kwlist[] = { "cpustr", NULL };
532 	char *cpustr = NULL;
533 
534 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
535 					 kwlist, &cpustr))
536 		return -1;
537 
538 	pcpus->cpus = perf_cpu_map__new(cpustr);
539 	if (pcpus->cpus == NULL)
540 		return -1;
541 	return 0;
542 }
543 
544 static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
545 {
546 	perf_cpu_map__put(pcpus->cpus);
547 	Py_TYPE(pcpus)->tp_free((PyObject*)pcpus);
548 }
549 
550 static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
551 {
552 	struct pyrf_cpu_map *pcpus = (void *)obj;
553 
554 	return perf_cpu_map__nr(pcpus->cpus);
555 }
556 
557 static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
558 {
559 	struct pyrf_cpu_map *pcpus = (void *)obj;
560 
561 	if (i >= perf_cpu_map__nr(pcpus->cpus))
562 		return NULL;
563 
564 	return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu);
565 }
566 
567 static PySequenceMethods pyrf_cpu_map__sequence_methods = {
568 	.sq_length = pyrf_cpu_map__length,
569 	.sq_item   = pyrf_cpu_map__item,
570 };
571 
572 static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
573 
574 static PyTypeObject pyrf_cpu_map__type = {
575 	PyVarObject_HEAD_INIT(NULL, 0)
576 	.tp_name	= "perf.cpu_map",
577 	.tp_basicsize	= sizeof(struct pyrf_cpu_map),
578 	.tp_dealloc	= (destructor)pyrf_cpu_map__delete,
579 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
580 	.tp_doc		= pyrf_cpu_map__doc,
581 	.tp_as_sequence	= &pyrf_cpu_map__sequence_methods,
582 	.tp_init	= (initproc)pyrf_cpu_map__init,
583 };
584 
585 static int pyrf_cpu_map__setup_types(void)
586 {
587 	pyrf_cpu_map__type.tp_new = PyType_GenericNew;
588 	return PyType_Ready(&pyrf_cpu_map__type);
589 }
590 
591 struct pyrf_thread_map {
592 	PyObject_HEAD
593 
594 	struct perf_thread_map *threads;
595 };
596 
597 static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
598 				 PyObject *args, PyObject *kwargs)
599 {
600 	static char *kwlist[] = { "pid", "tid", "uid", NULL };
601 	int pid = -1, tid = -1, uid = UINT_MAX;
602 
603 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii",
604 					 kwlist, &pid, &tid, &uid))
605 		return -1;
606 
607 	pthreads->threads = thread_map__new(pid, tid, uid);
608 	if (pthreads->threads == NULL)
609 		return -1;
610 	return 0;
611 }
612 
613 static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
614 {
615 	perf_thread_map__put(pthreads->threads);
616 	Py_TYPE(pthreads)->tp_free((PyObject*)pthreads);
617 }
618 
619 static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
620 {
621 	struct pyrf_thread_map *pthreads = (void *)obj;
622 
623 	return perf_thread_map__nr(pthreads->threads);
624 }
625 
626 static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
627 {
628 	struct pyrf_thread_map *pthreads = (void *)obj;
629 
630 	if (i >= perf_thread_map__nr(pthreads->threads))
631 		return NULL;
632 
633 	return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i));
634 }
635 
636 static PySequenceMethods pyrf_thread_map__sequence_methods = {
637 	.sq_length = pyrf_thread_map__length,
638 	.sq_item   = pyrf_thread_map__item,
639 };
640 
641 static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
642 
643 static PyTypeObject pyrf_thread_map__type = {
644 	PyVarObject_HEAD_INIT(NULL, 0)
645 	.tp_name	= "perf.thread_map",
646 	.tp_basicsize	= sizeof(struct pyrf_thread_map),
647 	.tp_dealloc	= (destructor)pyrf_thread_map__delete,
648 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
649 	.tp_doc		= pyrf_thread_map__doc,
650 	.tp_as_sequence	= &pyrf_thread_map__sequence_methods,
651 	.tp_init	= (initproc)pyrf_thread_map__init,
652 };
653 
654 static int pyrf_thread_map__setup_types(void)
655 {
656 	pyrf_thread_map__type.tp_new = PyType_GenericNew;
657 	return PyType_Ready(&pyrf_thread_map__type);
658 }
659 
660 struct pyrf_evsel {
661 	PyObject_HEAD
662 
663 	struct evsel evsel;
664 };
665 
666 static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
667 			    PyObject *args, PyObject *kwargs)
668 {
669 	struct perf_event_attr attr = {
670 		.type = PERF_TYPE_HARDWARE,
671 		.config = PERF_COUNT_HW_CPU_CYCLES,
672 		.sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
673 	};
674 	static char *kwlist[] = {
675 		"type",
676 		"config",
677 		"sample_freq",
678 		"sample_period",
679 		"sample_type",
680 		"read_format",
681 		"disabled",
682 		"inherit",
683 		"pinned",
684 		"exclusive",
685 		"exclude_user",
686 		"exclude_kernel",
687 		"exclude_hv",
688 		"exclude_idle",
689 		"mmap",
690 		"context_switch",
691 		"comm",
692 		"freq",
693 		"inherit_stat",
694 		"enable_on_exec",
695 		"task",
696 		"watermark",
697 		"precise_ip",
698 		"mmap_data",
699 		"sample_id_all",
700 		"wakeup_events",
701 		"bp_type",
702 		"bp_addr",
703 		"bp_len",
704 		 NULL
705 	};
706 	u64 sample_period = 0;
707 	u32 disabled = 0,
708 	    inherit = 0,
709 	    pinned = 0,
710 	    exclusive = 0,
711 	    exclude_user = 0,
712 	    exclude_kernel = 0,
713 	    exclude_hv = 0,
714 	    exclude_idle = 0,
715 	    mmap = 0,
716 	    context_switch = 0,
717 	    comm = 0,
718 	    freq = 1,
719 	    inherit_stat = 0,
720 	    enable_on_exec = 0,
721 	    task = 0,
722 	    watermark = 0,
723 	    precise_ip = 0,
724 	    mmap_data = 0,
725 	    sample_id_all = 1;
726 	int idx = 0;
727 
728 	if (!PyArg_ParseTupleAndKeywords(args, kwargs,
729 					 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
730 					 &attr.type, &attr.config, &attr.sample_freq,
731 					 &sample_period, &attr.sample_type,
732 					 &attr.read_format, &disabled, &inherit,
733 					 &pinned, &exclusive, &exclude_user,
734 					 &exclude_kernel, &exclude_hv, &exclude_idle,
735 					 &mmap, &context_switch, &comm, &freq, &inherit_stat,
736 					 &enable_on_exec, &task, &watermark,
737 					 &precise_ip, &mmap_data, &sample_id_all,
738 					 &attr.wakeup_events, &attr.bp_type,
739 					 &attr.bp_addr, &attr.bp_len, &idx))
740 		return -1;
741 
742 	/* union... */
743 	if (sample_period != 0) {
744 		if (attr.sample_freq != 0)
745 			return -1; /* FIXME: throw right exception */
746 		attr.sample_period = sample_period;
747 	}
748 
749 	/* Bitfields */
750 	attr.disabled	    = disabled;
751 	attr.inherit	    = inherit;
752 	attr.pinned	    = pinned;
753 	attr.exclusive	    = exclusive;
754 	attr.exclude_user   = exclude_user;
755 	attr.exclude_kernel = exclude_kernel;
756 	attr.exclude_hv	    = exclude_hv;
757 	attr.exclude_idle   = exclude_idle;
758 	attr.mmap	    = mmap;
759 	attr.context_switch = context_switch;
760 	attr.comm	    = comm;
761 	attr.freq	    = freq;
762 	attr.inherit_stat   = inherit_stat;
763 	attr.enable_on_exec = enable_on_exec;
764 	attr.task	    = task;
765 	attr.watermark	    = watermark;
766 	attr.precise_ip	    = precise_ip;
767 	attr.mmap_data	    = mmap_data;
768 	attr.sample_id_all  = sample_id_all;
769 	attr.size	    = sizeof(attr);
770 
771 	evsel__init(&pevsel->evsel, &attr, idx);
772 	return 0;
773 }
774 
775 static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
776 {
777 	evsel__exit(&pevsel->evsel);
778 	Py_TYPE(pevsel)->tp_free((PyObject*)pevsel);
779 }
780 
781 static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
782 				  PyObject *args, PyObject *kwargs)
783 {
784 	struct evsel *evsel = &pevsel->evsel;
785 	struct perf_cpu_map *cpus = NULL;
786 	struct perf_thread_map *threads = NULL;
787 	PyObject *pcpus = NULL, *pthreads = NULL;
788 	int group = 0, inherit = 0;
789 	static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL };
790 
791 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
792 					 &pcpus, &pthreads, &group, &inherit))
793 		return NULL;
794 
795 	if (pthreads != NULL)
796 		threads = ((struct pyrf_thread_map *)pthreads)->threads;
797 
798 	if (pcpus != NULL)
799 		cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
800 
801 	evsel->core.attr.inherit = inherit;
802 	/*
803 	 * This will group just the fds for this single evsel, to group
804 	 * multiple events, use evlist.open().
805 	 */
806 	if (evsel__open(evsel, cpus, threads) < 0) {
807 		PyErr_SetFromErrno(PyExc_OSError);
808 		return NULL;
809 	}
810 
811 	Py_INCREF(Py_None);
812 	return Py_None;
813 }
814 
815 static PyMethodDef pyrf_evsel__methods[] = {
816 	{
817 		.ml_name  = "open",
818 		.ml_meth  = (PyCFunction)pyrf_evsel__open,
819 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
820 		.ml_doc	  = PyDoc_STR("open the event selector file descriptor table.")
821 	},
822 	{ .ml_name = NULL, }
823 };
824 
825 static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
826 
827 static PyTypeObject pyrf_evsel__type = {
828 	PyVarObject_HEAD_INIT(NULL, 0)
829 	.tp_name	= "perf.evsel",
830 	.tp_basicsize	= sizeof(struct pyrf_evsel),
831 	.tp_dealloc	= (destructor)pyrf_evsel__delete,
832 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
833 	.tp_doc		= pyrf_evsel__doc,
834 	.tp_methods	= pyrf_evsel__methods,
835 	.tp_init	= (initproc)pyrf_evsel__init,
836 };
837 
838 static int pyrf_evsel__setup_types(void)
839 {
840 	pyrf_evsel__type.tp_new = PyType_GenericNew;
841 	return PyType_Ready(&pyrf_evsel__type);
842 }
843 
844 struct pyrf_evlist {
845 	PyObject_HEAD
846 
847 	struct evlist evlist;
848 };
849 
850 static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
851 			     PyObject *args, PyObject *kwargs __maybe_unused)
852 {
853 	PyObject *pcpus = NULL, *pthreads = NULL;
854 	struct perf_cpu_map *cpus;
855 	struct perf_thread_map *threads;
856 
857 	if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
858 		return -1;
859 
860 	threads = ((struct pyrf_thread_map *)pthreads)->threads;
861 	cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
862 	evlist__init(&pevlist->evlist, cpus, threads);
863 	return 0;
864 }
865 
866 static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
867 {
868 	evlist__exit(&pevlist->evlist);
869 	Py_TYPE(pevlist)->tp_free((PyObject*)pevlist);
870 }
871 
872 static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
873 				   PyObject *args, PyObject *kwargs)
874 {
875 	struct evlist *evlist = &pevlist->evlist;
876 	static char *kwlist[] = { "pages", "overwrite", NULL };
877 	int pages = 128, overwrite = false;
878 
879 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
880 					 &pages, &overwrite))
881 		return NULL;
882 
883 	if (evlist__mmap(evlist, pages) < 0) {
884 		PyErr_SetFromErrno(PyExc_OSError);
885 		return NULL;
886 	}
887 
888 	Py_INCREF(Py_None);
889 	return Py_None;
890 }
891 
892 static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
893 				   PyObject *args, PyObject *kwargs)
894 {
895 	struct evlist *evlist = &pevlist->evlist;
896 	static char *kwlist[] = { "timeout", NULL };
897 	int timeout = -1, n;
898 
899 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
900 		return NULL;
901 
902 	n = evlist__poll(evlist, timeout);
903 	if (n < 0) {
904 		PyErr_SetFromErrno(PyExc_OSError);
905 		return NULL;
906 	}
907 
908 	return Py_BuildValue("i", n);
909 }
910 
911 static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
912 					 PyObject *args __maybe_unused,
913 					 PyObject *kwargs __maybe_unused)
914 {
915 	struct evlist *evlist = &pevlist->evlist;
916         PyObject *list = PyList_New(0);
917 	int i;
918 
919 	for (i = 0; i < evlist->core.pollfd.nr; ++i) {
920 		PyObject *file;
921 #if PY_MAJOR_VERSION < 3
922 		FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r");
923 
924 		if (fp == NULL)
925 			goto free_list;
926 
927 		file = PyFile_FromFile(fp, "perf", "r", NULL);
928 #else
929 		file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1,
930 				     NULL, NULL, NULL, 0);
931 #endif
932 		if (file == NULL)
933 			goto free_list;
934 
935 		if (PyList_Append(list, file) != 0) {
936 			Py_DECREF(file);
937 			goto free_list;
938 		}
939 
940 		Py_DECREF(file);
941 	}
942 
943 	return list;
944 free_list:
945 	return PyErr_NoMemory();
946 }
947 
948 
949 static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
950 				  PyObject *args,
951 				  PyObject *kwargs __maybe_unused)
952 {
953 	struct evlist *evlist = &pevlist->evlist;
954 	PyObject *pevsel;
955 	struct evsel *evsel;
956 
957 	if (!PyArg_ParseTuple(args, "O", &pevsel))
958 		return NULL;
959 
960 	Py_INCREF(pevsel);
961 	evsel = &((struct pyrf_evsel *)pevsel)->evsel;
962 	evsel->core.idx = evlist->core.nr_entries;
963 	evlist__add(evlist, evsel);
964 
965 	return Py_BuildValue("i", evlist->core.nr_entries);
966 }
967 
968 static struct mmap *get_md(struct evlist *evlist, int cpu)
969 {
970 	int i;
971 
972 	for (i = 0; i < evlist->core.nr_mmaps; i++) {
973 		struct mmap *md = &evlist->mmap[i];
974 
975 		if (md->core.cpu.cpu == cpu)
976 			return md;
977 	}
978 
979 	return NULL;
980 }
981 
982 static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
983 					  PyObject *args, PyObject *kwargs)
984 {
985 	struct evlist *evlist = &pevlist->evlist;
986 	union perf_event *event;
987 	int sample_id_all = 1, cpu;
988 	static char *kwlist[] = { "cpu", "sample_id_all", NULL };
989 	struct mmap *md;
990 	int err;
991 
992 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
993 					 &cpu, &sample_id_all))
994 		return NULL;
995 
996 	md = get_md(evlist, cpu);
997 	if (!md)
998 		return NULL;
999 
1000 	if (perf_mmap__read_init(&md->core) < 0)
1001 		goto end;
1002 
1003 	event = perf_mmap__read_event(&md->core);
1004 	if (event != NULL) {
1005 		PyObject *pyevent = pyrf_event__new(event);
1006 		struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
1007 		struct evsel *evsel;
1008 
1009 		if (pyevent == NULL)
1010 			return PyErr_NoMemory();
1011 
1012 		evsel = evlist__event2evsel(evlist, event);
1013 		if (!evsel) {
1014 			Py_INCREF(Py_None);
1015 			return Py_None;
1016 		}
1017 
1018 		pevent->evsel = evsel;
1019 
1020 		err = evsel__parse_sample(evsel, event, &pevent->sample);
1021 
1022 		/* Consume the even only after we parsed it out. */
1023 		perf_mmap__consume(&md->core);
1024 
1025 		if (err)
1026 			return PyErr_Format(PyExc_OSError,
1027 					    "perf: can't parse sample, err=%d", err);
1028 		return pyevent;
1029 	}
1030 end:
1031 	Py_INCREF(Py_None);
1032 	return Py_None;
1033 }
1034 
1035 static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
1036 				   PyObject *args, PyObject *kwargs)
1037 {
1038 	struct evlist *evlist = &pevlist->evlist;
1039 
1040 	if (evlist__open(evlist) < 0) {
1041 		PyErr_SetFromErrno(PyExc_OSError);
1042 		return NULL;
1043 	}
1044 
1045 	Py_INCREF(Py_None);
1046 	return Py_None;
1047 }
1048 
1049 static PyMethodDef pyrf_evlist__methods[] = {
1050 	{
1051 		.ml_name  = "mmap",
1052 		.ml_meth  = (PyCFunction)pyrf_evlist__mmap,
1053 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1054 		.ml_doc	  = PyDoc_STR("mmap the file descriptor table.")
1055 	},
1056 	{
1057 		.ml_name  = "open",
1058 		.ml_meth  = (PyCFunction)pyrf_evlist__open,
1059 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1060 		.ml_doc	  = PyDoc_STR("open the file descriptors.")
1061 	},
1062 	{
1063 		.ml_name  = "poll",
1064 		.ml_meth  = (PyCFunction)pyrf_evlist__poll,
1065 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1066 		.ml_doc	  = PyDoc_STR("poll the file descriptor table.")
1067 	},
1068 	{
1069 		.ml_name  = "get_pollfd",
1070 		.ml_meth  = (PyCFunction)pyrf_evlist__get_pollfd,
1071 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1072 		.ml_doc	  = PyDoc_STR("get the poll file descriptor table.")
1073 	},
1074 	{
1075 		.ml_name  = "add",
1076 		.ml_meth  = (PyCFunction)pyrf_evlist__add,
1077 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1078 		.ml_doc	  = PyDoc_STR("adds an event selector to the list.")
1079 	},
1080 	{
1081 		.ml_name  = "read_on_cpu",
1082 		.ml_meth  = (PyCFunction)pyrf_evlist__read_on_cpu,
1083 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1084 		.ml_doc	  = PyDoc_STR("reads an event.")
1085 	},
1086 	{ .ml_name = NULL, }
1087 };
1088 
1089 static Py_ssize_t pyrf_evlist__length(PyObject *obj)
1090 {
1091 	struct pyrf_evlist *pevlist = (void *)obj;
1092 
1093 	return pevlist->evlist.core.nr_entries;
1094 }
1095 
1096 static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
1097 {
1098 	struct pyrf_evlist *pevlist = (void *)obj;
1099 	struct evsel *pos;
1100 
1101 	if (i >= pevlist->evlist.core.nr_entries)
1102 		return NULL;
1103 
1104 	evlist__for_each_entry(&pevlist->evlist, pos) {
1105 		if (i-- == 0)
1106 			break;
1107 	}
1108 
1109 	return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
1110 }
1111 
1112 static PySequenceMethods pyrf_evlist__sequence_methods = {
1113 	.sq_length = pyrf_evlist__length,
1114 	.sq_item   = pyrf_evlist__item,
1115 };
1116 
1117 static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
1118 
1119 static PyTypeObject pyrf_evlist__type = {
1120 	PyVarObject_HEAD_INIT(NULL, 0)
1121 	.tp_name	= "perf.evlist",
1122 	.tp_basicsize	= sizeof(struct pyrf_evlist),
1123 	.tp_dealloc	= (destructor)pyrf_evlist__delete,
1124 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
1125 	.tp_as_sequence	= &pyrf_evlist__sequence_methods,
1126 	.tp_doc		= pyrf_evlist__doc,
1127 	.tp_methods	= pyrf_evlist__methods,
1128 	.tp_init	= (initproc)pyrf_evlist__init,
1129 };
1130 
1131 static int pyrf_evlist__setup_types(void)
1132 {
1133 	pyrf_evlist__type.tp_new = PyType_GenericNew;
1134 	return PyType_Ready(&pyrf_evlist__type);
1135 }
1136 
1137 #define PERF_CONST(name) { #name, PERF_##name }
1138 
1139 static struct {
1140 	const char *name;
1141 	int	    value;
1142 } perf__constants[] = {
1143 	PERF_CONST(TYPE_HARDWARE),
1144 	PERF_CONST(TYPE_SOFTWARE),
1145 	PERF_CONST(TYPE_TRACEPOINT),
1146 	PERF_CONST(TYPE_HW_CACHE),
1147 	PERF_CONST(TYPE_RAW),
1148 	PERF_CONST(TYPE_BREAKPOINT),
1149 
1150 	PERF_CONST(COUNT_HW_CPU_CYCLES),
1151 	PERF_CONST(COUNT_HW_INSTRUCTIONS),
1152 	PERF_CONST(COUNT_HW_CACHE_REFERENCES),
1153 	PERF_CONST(COUNT_HW_CACHE_MISSES),
1154 	PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS),
1155 	PERF_CONST(COUNT_HW_BRANCH_MISSES),
1156 	PERF_CONST(COUNT_HW_BUS_CYCLES),
1157 	PERF_CONST(COUNT_HW_CACHE_L1D),
1158 	PERF_CONST(COUNT_HW_CACHE_L1I),
1159 	PERF_CONST(COUNT_HW_CACHE_LL),
1160 	PERF_CONST(COUNT_HW_CACHE_DTLB),
1161 	PERF_CONST(COUNT_HW_CACHE_ITLB),
1162 	PERF_CONST(COUNT_HW_CACHE_BPU),
1163 	PERF_CONST(COUNT_HW_CACHE_OP_READ),
1164 	PERF_CONST(COUNT_HW_CACHE_OP_WRITE),
1165 	PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH),
1166 	PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS),
1167 	PERF_CONST(COUNT_HW_CACHE_RESULT_MISS),
1168 
1169 	PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND),
1170 	PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND),
1171 
1172 	PERF_CONST(COUNT_SW_CPU_CLOCK),
1173 	PERF_CONST(COUNT_SW_TASK_CLOCK),
1174 	PERF_CONST(COUNT_SW_PAGE_FAULTS),
1175 	PERF_CONST(COUNT_SW_CONTEXT_SWITCHES),
1176 	PERF_CONST(COUNT_SW_CPU_MIGRATIONS),
1177 	PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN),
1178 	PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ),
1179 	PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS),
1180 	PERF_CONST(COUNT_SW_EMULATION_FAULTS),
1181 	PERF_CONST(COUNT_SW_DUMMY),
1182 
1183 	PERF_CONST(SAMPLE_IP),
1184 	PERF_CONST(SAMPLE_TID),
1185 	PERF_CONST(SAMPLE_TIME),
1186 	PERF_CONST(SAMPLE_ADDR),
1187 	PERF_CONST(SAMPLE_READ),
1188 	PERF_CONST(SAMPLE_CALLCHAIN),
1189 	PERF_CONST(SAMPLE_ID),
1190 	PERF_CONST(SAMPLE_CPU),
1191 	PERF_CONST(SAMPLE_PERIOD),
1192 	PERF_CONST(SAMPLE_STREAM_ID),
1193 	PERF_CONST(SAMPLE_RAW),
1194 
1195 	PERF_CONST(FORMAT_TOTAL_TIME_ENABLED),
1196 	PERF_CONST(FORMAT_TOTAL_TIME_RUNNING),
1197 	PERF_CONST(FORMAT_ID),
1198 	PERF_CONST(FORMAT_GROUP),
1199 
1200 	PERF_CONST(RECORD_MMAP),
1201 	PERF_CONST(RECORD_LOST),
1202 	PERF_CONST(RECORD_COMM),
1203 	PERF_CONST(RECORD_EXIT),
1204 	PERF_CONST(RECORD_THROTTLE),
1205 	PERF_CONST(RECORD_UNTHROTTLE),
1206 	PERF_CONST(RECORD_FORK),
1207 	PERF_CONST(RECORD_READ),
1208 	PERF_CONST(RECORD_SAMPLE),
1209 	PERF_CONST(RECORD_MMAP2),
1210 	PERF_CONST(RECORD_AUX),
1211 	PERF_CONST(RECORD_ITRACE_START),
1212 	PERF_CONST(RECORD_LOST_SAMPLES),
1213 	PERF_CONST(RECORD_SWITCH),
1214 	PERF_CONST(RECORD_SWITCH_CPU_WIDE),
1215 
1216 	PERF_CONST(RECORD_MISC_SWITCH_OUT),
1217 	{ .name = NULL, },
1218 };
1219 
1220 static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
1221 				  PyObject *args, PyObject *kwargs)
1222 {
1223 #ifndef HAVE_LIBTRACEEVENT
1224 	return NULL;
1225 #else
1226 	struct tep_event *tp_format;
1227 	static char *kwlist[] = { "sys", "name", NULL };
1228 	char *sys  = NULL;
1229 	char *name = NULL;
1230 
1231 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist,
1232 					 &sys, &name))
1233 		return NULL;
1234 
1235 	tp_format = trace_event__tp_format(sys, name);
1236 	if (IS_ERR(tp_format))
1237 		return _PyLong_FromLong(-1);
1238 
1239 	return _PyLong_FromLong(tp_format->id);
1240 #endif // HAVE_LIBTRACEEVENT
1241 }
1242 
1243 static PyMethodDef perf__methods[] = {
1244 	{
1245 		.ml_name  = "tracepoint",
1246 		.ml_meth  = (PyCFunction) pyrf__tracepoint,
1247 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1248 		.ml_doc	  = PyDoc_STR("Get tracepoint config.")
1249 	},
1250 	{ .ml_name = NULL, }
1251 };
1252 
1253 #if PY_MAJOR_VERSION < 3
1254 PyMODINIT_FUNC initperf(void)
1255 #else
1256 PyMODINIT_FUNC PyInit_perf(void)
1257 #endif
1258 {
1259 	PyObject *obj;
1260 	int i;
1261 	PyObject *dict;
1262 #if PY_MAJOR_VERSION < 3
1263 	PyObject *module = Py_InitModule("perf", perf__methods);
1264 #else
1265 	static struct PyModuleDef moduledef = {
1266 		PyModuleDef_HEAD_INIT,
1267 		"perf",			/* m_name */
1268 		"",			/* m_doc */
1269 		-1,			/* m_size */
1270 		perf__methods,		/* m_methods */
1271 		NULL,			/* m_reload */
1272 		NULL,			/* m_traverse */
1273 		NULL,			/* m_clear */
1274 		NULL,			/* m_free */
1275 	};
1276 	PyObject *module = PyModule_Create(&moduledef);
1277 #endif
1278 
1279 	if (module == NULL ||
1280 	    pyrf_event__setup_types() < 0 ||
1281 	    pyrf_evlist__setup_types() < 0 ||
1282 	    pyrf_evsel__setup_types() < 0 ||
1283 	    pyrf_thread_map__setup_types() < 0 ||
1284 	    pyrf_cpu_map__setup_types() < 0)
1285 #if PY_MAJOR_VERSION < 3
1286 		return;
1287 #else
1288 		return module;
1289 #endif
1290 
1291 	/* The page_size is placed in util object. */
1292 	page_size = sysconf(_SC_PAGE_SIZE);
1293 
1294 	Py_INCREF(&pyrf_evlist__type);
1295 	PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
1296 
1297 	Py_INCREF(&pyrf_evsel__type);
1298 	PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
1299 
1300 	Py_INCREF(&pyrf_mmap_event__type);
1301 	PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type);
1302 
1303 	Py_INCREF(&pyrf_lost_event__type);
1304 	PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type);
1305 
1306 	Py_INCREF(&pyrf_comm_event__type);
1307 	PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type);
1308 
1309 	Py_INCREF(&pyrf_task_event__type);
1310 	PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1311 
1312 	Py_INCREF(&pyrf_throttle_event__type);
1313 	PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type);
1314 
1315 	Py_INCREF(&pyrf_task_event__type);
1316 	PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1317 
1318 	Py_INCREF(&pyrf_read_event__type);
1319 	PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type);
1320 
1321 	Py_INCREF(&pyrf_sample_event__type);
1322 	PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type);
1323 
1324 	Py_INCREF(&pyrf_context_switch_event__type);
1325 	PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type);
1326 
1327 	Py_INCREF(&pyrf_thread_map__type);
1328 	PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
1329 
1330 	Py_INCREF(&pyrf_cpu_map__type);
1331 	PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
1332 
1333 	dict = PyModule_GetDict(module);
1334 	if (dict == NULL)
1335 		goto error;
1336 
1337 	for (i = 0; perf__constants[i].name != NULL; i++) {
1338 		obj = _PyLong_FromLong(perf__constants[i].value);
1339 		if (obj == NULL)
1340 			goto error;
1341 		PyDict_SetItemString(dict, perf__constants[i].name, obj);
1342 		Py_DECREF(obj);
1343 	}
1344 
1345 error:
1346 	if (PyErr_Occurred())
1347 		PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
1348 #if PY_MAJOR_VERSION >= 3
1349 	return module;
1350 #endif
1351 }
1352 
1353 
1354 /* The following are stubs to avoid dragging in builtin-* objects. */
1355 /* TODO: move the code out of the builtin-* file into util. */
1356 
1357 unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
1358 
1359 #ifdef HAVE_KVM_STAT_SUPPORT
1360 bool kvm_entry_event(struct evsel *evsel __maybe_unused)
1361 {
1362 	return false;
1363 }
1364 
1365 bool kvm_exit_event(struct evsel *evsel __maybe_unused)
1366 {
1367 	return false;
1368 }
1369 
1370 bool exit_event_begin(struct evsel *evsel __maybe_unused,
1371 		      struct perf_sample *sample  __maybe_unused,
1372 		      struct event_key *key  __maybe_unused)
1373 {
1374 	return false;
1375 }
1376 
1377 bool exit_event_end(struct evsel *evsel __maybe_unused,
1378 		    struct perf_sample *sample __maybe_unused,
1379 		    struct event_key *key __maybe_unused)
1380 {
1381 	return false;
1382 }
1383 
1384 void exit_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
1385 			   struct event_key *key __maybe_unused,
1386 			   char *decode __maybe_unused)
1387 {
1388 }
1389 #endif // HAVE_KVM_STAT_SUPPORT
1390 
1391 int find_scripts(char **scripts_array  __maybe_unused, char **scripts_path_array  __maybe_unused,
1392 		int num  __maybe_unused, int pathlen __maybe_unused)
1393 {
1394 	return -1;
1395 }
1396 
1397 void perf_stat__set_no_csv_summary(int set __maybe_unused)
1398 {
1399 }
1400 
1401 void perf_stat__set_big_num(int set __maybe_unused)
1402 {
1403 }
1404 
1405 int script_spec_register(const char *spec __maybe_unused, struct scripting_ops *ops __maybe_unused)
1406 {
1407 	return -1;
1408 }
1409 
1410 arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch __maybe_unused)
1411 {
1412 	return NULL;
1413 }
1414 
1415 struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork __maybe_unused,
1416 				       struct kwork_class *class __maybe_unused,
1417 				       struct kwork_work *key  __maybe_unused)
1418 {
1419 	return NULL;
1420 }
1421 
1422 void script_fetch_insn(struct perf_sample *sample __maybe_unused,
1423 		struct thread *thread __maybe_unused,
1424 		struct machine *machine __maybe_unused)
1425 {
1426 }
1427 
1428 int perf_sample__sprintf_flags(u32 flags __maybe_unused, char *str __maybe_unused,
1429 			size_t sz __maybe_unused)
1430 {
1431 	return -1;
1432 }
1433 
1434 bool match_callstack_filter(struct machine *machine __maybe_unused, u64 *callstack __maybe_unused)
1435 {
1436 	return false;
1437 }
1438 
1439 struct lock_stat *lock_stat_find(u64 addr __maybe_unused)
1440 {
1441 	return NULL;
1442 }
1443 
1444 struct lock_stat *lock_stat_findnew(u64 addr __maybe_unused, const char *name __maybe_unused,
1445 				int flags __maybe_unused)
1446 {
1447 	return NULL;
1448 }
1449 
1450 int cmd_inject(int argc __maybe_unused, const char *argv[] __maybe_unused)
1451 {
1452 	return -1;
1453 }
1454