xref: /linux/tools/perf/util/python.c (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <Python.h>
3 #include <structmember.h>
4 #include <inttypes.h>
5 #include <poll.h>
6 #include <linux/err.h>
7 #include <perf/cpumap.h>
8 #ifdef HAVE_LIBTRACEEVENT
9 #include <traceevent/event-parse.h>
10 #endif
11 #include <perf/mmap.h>
12 #include "evlist.h"
13 #include "evsel.h"
14 #include "event.h"
15 #include "print_binary.h"
16 #include "thread_map.h"
17 #include "trace-event.h"
18 #include "mmap.h"
19 #include "util/bpf-filter.h"
20 #include "util/env.h"
21 #include "util/kvm-stat.h"
22 #include "util/kwork.h"
23 #include "util/lock-contention.h"
24 #include <internal/lib.h>
25 #include "../builtin.h"
26 
27 #if PY_MAJOR_VERSION < 3
28 #define _PyUnicode_FromString(arg) \
29   PyString_FromString(arg)
30 #define _PyUnicode_AsString(arg) \
31   PyString_AsString(arg)
32 #define _PyUnicode_FromFormat(...) \
33   PyString_FromFormat(__VA_ARGS__)
34 #define _PyLong_FromLong(arg) \
35   PyInt_FromLong(arg)
36 
37 #else
38 
39 #define _PyUnicode_FromString(arg) \
40   PyUnicode_FromString(arg)
41 #define _PyUnicode_FromFormat(...) \
42   PyUnicode_FromFormat(__VA_ARGS__)
43 #define _PyLong_FromLong(arg) \
44   PyLong_FromLong(arg)
45 #endif
46 
47 #ifndef Py_TYPE
48 #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
49 #endif
50 
51 /* Define PyVarObject_HEAD_INIT for python 2.5 */
52 #ifndef PyVarObject_HEAD_INIT
53 # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
54 #endif
55 
56 #if PY_MAJOR_VERSION < 3
57 PyMODINIT_FUNC initperf(void);
58 #else
59 PyMODINIT_FUNC PyInit_perf(void);
60 #endif
61 
62 #define member_def(type, member, ptype, help) \
63 	{ #member, ptype, \
64 	  offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
65 	  0, help }
66 
67 #define sample_member_def(name, member, ptype, help) \
68 	{ #name, ptype, \
69 	  offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
70 	  0, help }
71 
72 struct pyrf_event {
73 	PyObject_HEAD
74 	struct evsel *evsel;
75 	struct perf_sample sample;
76 	union perf_event   event;
77 };
78 
79 #define sample_members \
80 	sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"),			 \
81 	sample_member_def(sample_pid, pid, T_INT, "event pid"),			 \
82 	sample_member_def(sample_tid, tid, T_INT, "event tid"),			 \
83 	sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"),		 \
84 	sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"),		 \
85 	sample_member_def(sample_id, id, T_ULONGLONG, "event id"),			 \
86 	sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
87 	sample_member_def(sample_period, period, T_ULONGLONG, "event period"),		 \
88 	sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
89 
90 static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
91 
92 static PyMemberDef pyrf_mmap_event__members[] = {
93 	sample_members
94 	member_def(perf_event_header, type, T_UINT, "event type"),
95 	member_def(perf_event_header, misc, T_UINT, "event misc"),
96 	member_def(perf_record_mmap, pid, T_UINT, "event pid"),
97 	member_def(perf_record_mmap, tid, T_UINT, "event tid"),
98 	member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"),
99 	member_def(perf_record_mmap, len, T_ULONGLONG, "map length"),
100 	member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"),
101 	member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"),
102 	{ .name = NULL, },
103 };
104 
105 static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
106 {
107 	PyObject *ret;
108 	char *s;
109 
110 	if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", "
111 			 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", "
112 			 "filename: %s }",
113 		     pevent->event.mmap.pid, pevent->event.mmap.tid,
114 		     pevent->event.mmap.start, pevent->event.mmap.len,
115 		     pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
116 		ret = PyErr_NoMemory();
117 	} else {
118 		ret = _PyUnicode_FromString(s);
119 		free(s);
120 	}
121 	return ret;
122 }
123 
124 static PyTypeObject pyrf_mmap_event__type = {
125 	PyVarObject_HEAD_INIT(NULL, 0)
126 	.tp_name	= "perf.mmap_event",
127 	.tp_basicsize	= sizeof(struct pyrf_event),
128 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
129 	.tp_doc		= pyrf_mmap_event__doc,
130 	.tp_members	= pyrf_mmap_event__members,
131 	.tp_repr	= (reprfunc)pyrf_mmap_event__repr,
132 };
133 
134 static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
135 
136 static PyMemberDef pyrf_task_event__members[] = {
137 	sample_members
138 	member_def(perf_event_header, type, T_UINT, "event type"),
139 	member_def(perf_record_fork, pid, T_UINT, "event pid"),
140 	member_def(perf_record_fork, ppid, T_UINT, "event ppid"),
141 	member_def(perf_record_fork, tid, T_UINT, "event tid"),
142 	member_def(perf_record_fork, ptid, T_UINT, "event ptid"),
143 	member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"),
144 	{ .name = NULL, },
145 };
146 
147 static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
148 {
149 	return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
150 				   "ptid: %u, time: %" PRI_lu64 "}",
151 				   pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
152 				   pevent->event.fork.pid,
153 				   pevent->event.fork.ppid,
154 				   pevent->event.fork.tid,
155 				   pevent->event.fork.ptid,
156 				   pevent->event.fork.time);
157 }
158 
159 static PyTypeObject pyrf_task_event__type = {
160 	PyVarObject_HEAD_INIT(NULL, 0)
161 	.tp_name	= "perf.task_event",
162 	.tp_basicsize	= sizeof(struct pyrf_event),
163 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
164 	.tp_doc		= pyrf_task_event__doc,
165 	.tp_members	= pyrf_task_event__members,
166 	.tp_repr	= (reprfunc)pyrf_task_event__repr,
167 };
168 
169 static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
170 
171 static PyMemberDef pyrf_comm_event__members[] = {
172 	sample_members
173 	member_def(perf_event_header, type, T_UINT, "event type"),
174 	member_def(perf_record_comm, pid, T_UINT, "event pid"),
175 	member_def(perf_record_comm, tid, T_UINT, "event tid"),
176 	member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"),
177 	{ .name = NULL, },
178 };
179 
180 static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
181 {
182 	return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
183 				   pevent->event.comm.pid,
184 				   pevent->event.comm.tid,
185 				   pevent->event.comm.comm);
186 }
187 
188 static PyTypeObject pyrf_comm_event__type = {
189 	PyVarObject_HEAD_INIT(NULL, 0)
190 	.tp_name	= "perf.comm_event",
191 	.tp_basicsize	= sizeof(struct pyrf_event),
192 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
193 	.tp_doc		= pyrf_comm_event__doc,
194 	.tp_members	= pyrf_comm_event__members,
195 	.tp_repr	= (reprfunc)pyrf_comm_event__repr,
196 };
197 
198 static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
199 
200 static PyMemberDef pyrf_throttle_event__members[] = {
201 	sample_members
202 	member_def(perf_event_header, type, T_UINT, "event type"),
203 	member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"),
204 	member_def(perf_record_throttle, id, T_ULONGLONG, "event id"),
205 	member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"),
206 	{ .name = NULL, },
207 };
208 
209 static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
210 {
211 	struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1);
212 
213 	return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64
214 				   ", stream_id: %" PRI_lu64 " }",
215 				   pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
216 				   te->time, te->id, te->stream_id);
217 }
218 
219 static PyTypeObject pyrf_throttle_event__type = {
220 	PyVarObject_HEAD_INIT(NULL, 0)
221 	.tp_name	= "perf.throttle_event",
222 	.tp_basicsize	= sizeof(struct pyrf_event),
223 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
224 	.tp_doc		= pyrf_throttle_event__doc,
225 	.tp_members	= pyrf_throttle_event__members,
226 	.tp_repr	= (reprfunc)pyrf_throttle_event__repr,
227 };
228 
229 static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
230 
231 static PyMemberDef pyrf_lost_event__members[] = {
232 	sample_members
233 	member_def(perf_record_lost, id, T_ULONGLONG, "event id"),
234 	member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"),
235 	{ .name = NULL, },
236 };
237 
238 static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent)
239 {
240 	PyObject *ret;
241 	char *s;
242 
243 	if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", "
244 			 "lost: %#" PRI_lx64 " }",
245 		     pevent->event.lost.id, pevent->event.lost.lost) < 0) {
246 		ret = PyErr_NoMemory();
247 	} else {
248 		ret = _PyUnicode_FromString(s);
249 		free(s);
250 	}
251 	return ret;
252 }
253 
254 static PyTypeObject pyrf_lost_event__type = {
255 	PyVarObject_HEAD_INIT(NULL, 0)
256 	.tp_name	= "perf.lost_event",
257 	.tp_basicsize	= sizeof(struct pyrf_event),
258 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
259 	.tp_doc		= pyrf_lost_event__doc,
260 	.tp_members	= pyrf_lost_event__members,
261 	.tp_repr	= (reprfunc)pyrf_lost_event__repr,
262 };
263 
264 static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
265 
266 static PyMemberDef pyrf_read_event__members[] = {
267 	sample_members
268 	member_def(perf_record_read, pid, T_UINT, "event pid"),
269 	member_def(perf_record_read, tid, T_UINT, "event tid"),
270 	{ .name = NULL, },
271 };
272 
273 static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent)
274 {
275 	return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }",
276 				   pevent->event.read.pid,
277 				   pevent->event.read.tid);
278 	/*
279  	 * FIXME: return the array of read values,
280  	 * making this method useful ;-)
281  	 */
282 }
283 
284 static PyTypeObject pyrf_read_event__type = {
285 	PyVarObject_HEAD_INIT(NULL, 0)
286 	.tp_name	= "perf.read_event",
287 	.tp_basicsize	= sizeof(struct pyrf_event),
288 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
289 	.tp_doc		= pyrf_read_event__doc,
290 	.tp_members	= pyrf_read_event__members,
291 	.tp_repr	= (reprfunc)pyrf_read_event__repr,
292 };
293 
294 static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
295 
296 static PyMemberDef pyrf_sample_event__members[] = {
297 	sample_members
298 	member_def(perf_event_header, type, T_UINT, "event type"),
299 	{ .name = NULL, },
300 };
301 
302 static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
303 {
304 	PyObject *ret;
305 	char *s;
306 
307 	if (asprintf(&s, "{ type: sample }") < 0) {
308 		ret = PyErr_NoMemory();
309 	} else {
310 		ret = _PyUnicode_FromString(s);
311 		free(s);
312 	}
313 	return ret;
314 }
315 
316 #ifdef HAVE_LIBTRACEEVENT
317 static bool is_tracepoint(struct pyrf_event *pevent)
318 {
319 	return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT;
320 }
321 
322 static PyObject*
323 tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
324 {
325 	struct tep_handle *pevent = field->event->tep;
326 	void *data = pe->sample.raw_data;
327 	PyObject *ret = NULL;
328 	unsigned long long val;
329 	unsigned int offset, len;
330 
331 	if (field->flags & TEP_FIELD_IS_ARRAY) {
332 		offset = field->offset;
333 		len    = field->size;
334 		if (field->flags & TEP_FIELD_IS_DYNAMIC) {
335 			val     = tep_read_number(pevent, data + offset, len);
336 			offset  = val;
337 			len     = offset >> 16;
338 			offset &= 0xffff;
339 			if (tep_field_is_relative(field->flags))
340 				offset += field->offset + field->size;
341 		}
342 		if (field->flags & TEP_FIELD_IS_STRING &&
343 		    is_printable_array(data + offset, len)) {
344 			ret = _PyUnicode_FromString((char *)data + offset);
345 		} else {
346 			ret = PyByteArray_FromStringAndSize((const char *) data + offset, len);
347 			field->flags &= ~TEP_FIELD_IS_STRING;
348 		}
349 	} else {
350 		val = tep_read_number(pevent, data + field->offset,
351 				      field->size);
352 		if (field->flags & TEP_FIELD_IS_POINTER)
353 			ret = PyLong_FromUnsignedLong((unsigned long) val);
354 		else if (field->flags & TEP_FIELD_IS_SIGNED)
355 			ret = PyLong_FromLong((long) val);
356 		else
357 			ret = PyLong_FromUnsignedLong((unsigned long) val);
358 	}
359 
360 	return ret;
361 }
362 
363 static PyObject*
364 get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
365 {
366 	const char *str = _PyUnicode_AsString(PyObject_Str(attr_name));
367 	struct evsel *evsel = pevent->evsel;
368 	struct tep_format_field *field;
369 
370 	if (!evsel->tp_format) {
371 		struct tep_event *tp_format;
372 
373 		tp_format = trace_event__tp_format_id(evsel->core.attr.config);
374 		if (IS_ERR_OR_NULL(tp_format))
375 			return NULL;
376 
377 		evsel->tp_format = tp_format;
378 	}
379 
380 	field = tep_find_any_field(evsel->tp_format, str);
381 	if (!field)
382 		return NULL;
383 
384 	return tracepoint_field(pevent, field);
385 }
386 #endif /* HAVE_LIBTRACEEVENT */
387 
388 static PyObject*
389 pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name)
390 {
391 	PyObject *obj = NULL;
392 
393 #ifdef HAVE_LIBTRACEEVENT
394 	if (is_tracepoint(pevent))
395 		obj = get_tracepoint_field(pevent, attr_name);
396 #endif
397 
398 	return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name);
399 }
400 
401 static PyTypeObject pyrf_sample_event__type = {
402 	PyVarObject_HEAD_INIT(NULL, 0)
403 	.tp_name	= "perf.sample_event",
404 	.tp_basicsize	= sizeof(struct pyrf_event),
405 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
406 	.tp_doc		= pyrf_sample_event__doc,
407 	.tp_members	= pyrf_sample_event__members,
408 	.tp_repr	= (reprfunc)pyrf_sample_event__repr,
409 	.tp_getattro	= (getattrofunc) pyrf_sample_event__getattro,
410 };
411 
412 static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
413 
414 static PyMemberDef pyrf_context_switch_event__members[] = {
415 	sample_members
416 	member_def(perf_event_header, type, T_UINT, "event type"),
417 	member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"),
418 	member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"),
419 	{ .name = NULL, },
420 };
421 
422 static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent)
423 {
424 	PyObject *ret;
425 	char *s;
426 
427 	if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }",
428 		     pevent->event.context_switch.next_prev_pid,
429 		     pevent->event.context_switch.next_prev_tid,
430 		     !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) {
431 		ret = PyErr_NoMemory();
432 	} else {
433 		ret = _PyUnicode_FromString(s);
434 		free(s);
435 	}
436 	return ret;
437 }
438 
439 static PyTypeObject pyrf_context_switch_event__type = {
440 	PyVarObject_HEAD_INIT(NULL, 0)
441 	.tp_name	= "perf.context_switch_event",
442 	.tp_basicsize	= sizeof(struct pyrf_event),
443 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
444 	.tp_doc		= pyrf_context_switch_event__doc,
445 	.tp_members	= pyrf_context_switch_event__members,
446 	.tp_repr	= (reprfunc)pyrf_context_switch_event__repr,
447 };
448 
449 static int pyrf_event__setup_types(void)
450 {
451 	int err;
452 	pyrf_mmap_event__type.tp_new =
453 	pyrf_task_event__type.tp_new =
454 	pyrf_comm_event__type.tp_new =
455 	pyrf_lost_event__type.tp_new =
456 	pyrf_read_event__type.tp_new =
457 	pyrf_sample_event__type.tp_new =
458 	pyrf_context_switch_event__type.tp_new =
459 	pyrf_throttle_event__type.tp_new = PyType_GenericNew;
460 	err = PyType_Ready(&pyrf_mmap_event__type);
461 	if (err < 0)
462 		goto out;
463 	err = PyType_Ready(&pyrf_lost_event__type);
464 	if (err < 0)
465 		goto out;
466 	err = PyType_Ready(&pyrf_task_event__type);
467 	if (err < 0)
468 		goto out;
469 	err = PyType_Ready(&pyrf_comm_event__type);
470 	if (err < 0)
471 		goto out;
472 	err = PyType_Ready(&pyrf_throttle_event__type);
473 	if (err < 0)
474 		goto out;
475 	err = PyType_Ready(&pyrf_read_event__type);
476 	if (err < 0)
477 		goto out;
478 	err = PyType_Ready(&pyrf_sample_event__type);
479 	if (err < 0)
480 		goto out;
481 	err = PyType_Ready(&pyrf_context_switch_event__type);
482 	if (err < 0)
483 		goto out;
484 out:
485 	return err;
486 }
487 
488 static PyTypeObject *pyrf_event__type[] = {
489 	[PERF_RECORD_MMAP]	 = &pyrf_mmap_event__type,
490 	[PERF_RECORD_LOST]	 = &pyrf_lost_event__type,
491 	[PERF_RECORD_COMM]	 = &pyrf_comm_event__type,
492 	[PERF_RECORD_EXIT]	 = &pyrf_task_event__type,
493 	[PERF_RECORD_THROTTLE]	 = &pyrf_throttle_event__type,
494 	[PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
495 	[PERF_RECORD_FORK]	 = &pyrf_task_event__type,
496 	[PERF_RECORD_READ]	 = &pyrf_read_event__type,
497 	[PERF_RECORD_SAMPLE]	 = &pyrf_sample_event__type,
498 	[PERF_RECORD_SWITCH]	 = &pyrf_context_switch_event__type,
499 	[PERF_RECORD_SWITCH_CPU_WIDE]  = &pyrf_context_switch_event__type,
500 };
501 
502 static PyObject *pyrf_event__new(union perf_event *event)
503 {
504 	struct pyrf_event *pevent;
505 	PyTypeObject *ptype;
506 
507 	if ((event->header.type < PERF_RECORD_MMAP ||
508 	     event->header.type > PERF_RECORD_SAMPLE) &&
509 	    !(event->header.type == PERF_RECORD_SWITCH ||
510 	      event->header.type == PERF_RECORD_SWITCH_CPU_WIDE))
511 		return NULL;
512 
513 	ptype = pyrf_event__type[event->header.type];
514 	pevent = PyObject_New(struct pyrf_event, ptype);
515 	if (pevent != NULL)
516 		memcpy(&pevent->event, event, event->header.size);
517 	return (PyObject *)pevent;
518 }
519 
520 struct pyrf_cpu_map {
521 	PyObject_HEAD
522 
523 	struct perf_cpu_map *cpus;
524 };
525 
526 static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
527 			      PyObject *args, PyObject *kwargs)
528 {
529 	static char *kwlist[] = { "cpustr", NULL };
530 	char *cpustr = NULL;
531 
532 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
533 					 kwlist, &cpustr))
534 		return -1;
535 
536 	pcpus->cpus = perf_cpu_map__new(cpustr);
537 	if (pcpus->cpus == NULL)
538 		return -1;
539 	return 0;
540 }
541 
542 static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
543 {
544 	perf_cpu_map__put(pcpus->cpus);
545 	Py_TYPE(pcpus)->tp_free((PyObject*)pcpus);
546 }
547 
548 static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
549 {
550 	struct pyrf_cpu_map *pcpus = (void *)obj;
551 
552 	return perf_cpu_map__nr(pcpus->cpus);
553 }
554 
555 static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
556 {
557 	struct pyrf_cpu_map *pcpus = (void *)obj;
558 
559 	if (i >= perf_cpu_map__nr(pcpus->cpus))
560 		return NULL;
561 
562 	return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu);
563 }
564 
565 static PySequenceMethods pyrf_cpu_map__sequence_methods = {
566 	.sq_length = pyrf_cpu_map__length,
567 	.sq_item   = pyrf_cpu_map__item,
568 };
569 
570 static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
571 
572 static PyTypeObject pyrf_cpu_map__type = {
573 	PyVarObject_HEAD_INIT(NULL, 0)
574 	.tp_name	= "perf.cpu_map",
575 	.tp_basicsize	= sizeof(struct pyrf_cpu_map),
576 	.tp_dealloc	= (destructor)pyrf_cpu_map__delete,
577 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
578 	.tp_doc		= pyrf_cpu_map__doc,
579 	.tp_as_sequence	= &pyrf_cpu_map__sequence_methods,
580 	.tp_init	= (initproc)pyrf_cpu_map__init,
581 };
582 
583 static int pyrf_cpu_map__setup_types(void)
584 {
585 	pyrf_cpu_map__type.tp_new = PyType_GenericNew;
586 	return PyType_Ready(&pyrf_cpu_map__type);
587 }
588 
589 struct pyrf_thread_map {
590 	PyObject_HEAD
591 
592 	struct perf_thread_map *threads;
593 };
594 
595 static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
596 				 PyObject *args, PyObject *kwargs)
597 {
598 	static char *kwlist[] = { "pid", "tid", "uid", NULL };
599 	int pid = -1, tid = -1, uid = UINT_MAX;
600 
601 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii",
602 					 kwlist, &pid, &tid, &uid))
603 		return -1;
604 
605 	pthreads->threads = thread_map__new(pid, tid, uid);
606 	if (pthreads->threads == NULL)
607 		return -1;
608 	return 0;
609 }
610 
611 static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
612 {
613 	perf_thread_map__put(pthreads->threads);
614 	Py_TYPE(pthreads)->tp_free((PyObject*)pthreads);
615 }
616 
617 static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
618 {
619 	struct pyrf_thread_map *pthreads = (void *)obj;
620 
621 	return perf_thread_map__nr(pthreads->threads);
622 }
623 
624 static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
625 {
626 	struct pyrf_thread_map *pthreads = (void *)obj;
627 
628 	if (i >= perf_thread_map__nr(pthreads->threads))
629 		return NULL;
630 
631 	return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i));
632 }
633 
634 static PySequenceMethods pyrf_thread_map__sequence_methods = {
635 	.sq_length = pyrf_thread_map__length,
636 	.sq_item   = pyrf_thread_map__item,
637 };
638 
639 static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
640 
641 static PyTypeObject pyrf_thread_map__type = {
642 	PyVarObject_HEAD_INIT(NULL, 0)
643 	.tp_name	= "perf.thread_map",
644 	.tp_basicsize	= sizeof(struct pyrf_thread_map),
645 	.tp_dealloc	= (destructor)pyrf_thread_map__delete,
646 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
647 	.tp_doc		= pyrf_thread_map__doc,
648 	.tp_as_sequence	= &pyrf_thread_map__sequence_methods,
649 	.tp_init	= (initproc)pyrf_thread_map__init,
650 };
651 
652 static int pyrf_thread_map__setup_types(void)
653 {
654 	pyrf_thread_map__type.tp_new = PyType_GenericNew;
655 	return PyType_Ready(&pyrf_thread_map__type);
656 }
657 
658 struct pyrf_evsel {
659 	PyObject_HEAD
660 
661 	struct evsel evsel;
662 };
663 
664 static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
665 			    PyObject *args, PyObject *kwargs)
666 {
667 	struct perf_event_attr attr = {
668 		.type = PERF_TYPE_HARDWARE,
669 		.config = PERF_COUNT_HW_CPU_CYCLES,
670 		.sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
671 	};
672 	static char *kwlist[] = {
673 		"type",
674 		"config",
675 		"sample_freq",
676 		"sample_period",
677 		"sample_type",
678 		"read_format",
679 		"disabled",
680 		"inherit",
681 		"pinned",
682 		"exclusive",
683 		"exclude_user",
684 		"exclude_kernel",
685 		"exclude_hv",
686 		"exclude_idle",
687 		"mmap",
688 		"context_switch",
689 		"comm",
690 		"freq",
691 		"inherit_stat",
692 		"enable_on_exec",
693 		"task",
694 		"watermark",
695 		"precise_ip",
696 		"mmap_data",
697 		"sample_id_all",
698 		"wakeup_events",
699 		"bp_type",
700 		"bp_addr",
701 		"bp_len",
702 		 NULL
703 	};
704 	u64 sample_period = 0;
705 	u32 disabled = 0,
706 	    inherit = 0,
707 	    pinned = 0,
708 	    exclusive = 0,
709 	    exclude_user = 0,
710 	    exclude_kernel = 0,
711 	    exclude_hv = 0,
712 	    exclude_idle = 0,
713 	    mmap = 0,
714 	    context_switch = 0,
715 	    comm = 0,
716 	    freq = 1,
717 	    inherit_stat = 0,
718 	    enable_on_exec = 0,
719 	    task = 0,
720 	    watermark = 0,
721 	    precise_ip = 0,
722 	    mmap_data = 0,
723 	    sample_id_all = 1;
724 	int idx = 0;
725 
726 	if (!PyArg_ParseTupleAndKeywords(args, kwargs,
727 					 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
728 					 &attr.type, &attr.config, &attr.sample_freq,
729 					 &sample_period, &attr.sample_type,
730 					 &attr.read_format, &disabled, &inherit,
731 					 &pinned, &exclusive, &exclude_user,
732 					 &exclude_kernel, &exclude_hv, &exclude_idle,
733 					 &mmap, &context_switch, &comm, &freq, &inherit_stat,
734 					 &enable_on_exec, &task, &watermark,
735 					 &precise_ip, &mmap_data, &sample_id_all,
736 					 &attr.wakeup_events, &attr.bp_type,
737 					 &attr.bp_addr, &attr.bp_len, &idx))
738 		return -1;
739 
740 	/* union... */
741 	if (sample_period != 0) {
742 		if (attr.sample_freq != 0)
743 			return -1; /* FIXME: throw right exception */
744 		attr.sample_period = sample_period;
745 	}
746 
747 	/* Bitfields */
748 	attr.disabled	    = disabled;
749 	attr.inherit	    = inherit;
750 	attr.pinned	    = pinned;
751 	attr.exclusive	    = exclusive;
752 	attr.exclude_user   = exclude_user;
753 	attr.exclude_kernel = exclude_kernel;
754 	attr.exclude_hv	    = exclude_hv;
755 	attr.exclude_idle   = exclude_idle;
756 	attr.mmap	    = mmap;
757 	attr.context_switch = context_switch;
758 	attr.comm	    = comm;
759 	attr.freq	    = freq;
760 	attr.inherit_stat   = inherit_stat;
761 	attr.enable_on_exec = enable_on_exec;
762 	attr.task	    = task;
763 	attr.watermark	    = watermark;
764 	attr.precise_ip	    = precise_ip;
765 	attr.mmap_data	    = mmap_data;
766 	attr.sample_id_all  = sample_id_all;
767 	attr.size	    = sizeof(attr);
768 
769 	evsel__init(&pevsel->evsel, &attr, idx);
770 	return 0;
771 }
772 
773 static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
774 {
775 	evsel__exit(&pevsel->evsel);
776 	Py_TYPE(pevsel)->tp_free((PyObject*)pevsel);
777 }
778 
779 static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
780 				  PyObject *args, PyObject *kwargs)
781 {
782 	struct evsel *evsel = &pevsel->evsel;
783 	struct perf_cpu_map *cpus = NULL;
784 	struct perf_thread_map *threads = NULL;
785 	PyObject *pcpus = NULL, *pthreads = NULL;
786 	int group = 0, inherit = 0;
787 	static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL };
788 
789 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
790 					 &pcpus, &pthreads, &group, &inherit))
791 		return NULL;
792 
793 	if (pthreads != NULL)
794 		threads = ((struct pyrf_thread_map *)pthreads)->threads;
795 
796 	if (pcpus != NULL)
797 		cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
798 
799 	evsel->core.attr.inherit = inherit;
800 	/*
801 	 * This will group just the fds for this single evsel, to group
802 	 * multiple events, use evlist.open().
803 	 */
804 	if (evsel__open(evsel, cpus, threads) < 0) {
805 		PyErr_SetFromErrno(PyExc_OSError);
806 		return NULL;
807 	}
808 
809 	Py_INCREF(Py_None);
810 	return Py_None;
811 }
812 
813 static PyMethodDef pyrf_evsel__methods[] = {
814 	{
815 		.ml_name  = "open",
816 		.ml_meth  = (PyCFunction)pyrf_evsel__open,
817 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
818 		.ml_doc	  = PyDoc_STR("open the event selector file descriptor table.")
819 	},
820 	{ .ml_name = NULL, }
821 };
822 
823 static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
824 
825 static PyTypeObject pyrf_evsel__type = {
826 	PyVarObject_HEAD_INIT(NULL, 0)
827 	.tp_name	= "perf.evsel",
828 	.tp_basicsize	= sizeof(struct pyrf_evsel),
829 	.tp_dealloc	= (destructor)pyrf_evsel__delete,
830 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
831 	.tp_doc		= pyrf_evsel__doc,
832 	.tp_methods	= pyrf_evsel__methods,
833 	.tp_init	= (initproc)pyrf_evsel__init,
834 };
835 
836 static int pyrf_evsel__setup_types(void)
837 {
838 	pyrf_evsel__type.tp_new = PyType_GenericNew;
839 	return PyType_Ready(&pyrf_evsel__type);
840 }
841 
842 struct pyrf_evlist {
843 	PyObject_HEAD
844 
845 	struct evlist evlist;
846 };
847 
848 static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
849 			     PyObject *args, PyObject *kwargs __maybe_unused)
850 {
851 	PyObject *pcpus = NULL, *pthreads = NULL;
852 	struct perf_cpu_map *cpus;
853 	struct perf_thread_map *threads;
854 
855 	if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
856 		return -1;
857 
858 	threads = ((struct pyrf_thread_map *)pthreads)->threads;
859 	cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
860 	evlist__init(&pevlist->evlist, cpus, threads);
861 	return 0;
862 }
863 
864 static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
865 {
866 	evlist__exit(&pevlist->evlist);
867 	Py_TYPE(pevlist)->tp_free((PyObject*)pevlist);
868 }
869 
870 static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
871 				   PyObject *args, PyObject *kwargs)
872 {
873 	struct evlist *evlist = &pevlist->evlist;
874 	static char *kwlist[] = { "pages", "overwrite", NULL };
875 	int pages = 128, overwrite = false;
876 
877 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
878 					 &pages, &overwrite))
879 		return NULL;
880 
881 	if (evlist__mmap(evlist, pages) < 0) {
882 		PyErr_SetFromErrno(PyExc_OSError);
883 		return NULL;
884 	}
885 
886 	Py_INCREF(Py_None);
887 	return Py_None;
888 }
889 
890 static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
891 				   PyObject *args, PyObject *kwargs)
892 {
893 	struct evlist *evlist = &pevlist->evlist;
894 	static char *kwlist[] = { "timeout", NULL };
895 	int timeout = -1, n;
896 
897 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
898 		return NULL;
899 
900 	n = evlist__poll(evlist, timeout);
901 	if (n < 0) {
902 		PyErr_SetFromErrno(PyExc_OSError);
903 		return NULL;
904 	}
905 
906 	return Py_BuildValue("i", n);
907 }
908 
909 static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
910 					 PyObject *args __maybe_unused,
911 					 PyObject *kwargs __maybe_unused)
912 {
913 	struct evlist *evlist = &pevlist->evlist;
914         PyObject *list = PyList_New(0);
915 	int i;
916 
917 	for (i = 0; i < evlist->core.pollfd.nr; ++i) {
918 		PyObject *file;
919 #if PY_MAJOR_VERSION < 3
920 		FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r");
921 
922 		if (fp == NULL)
923 			goto free_list;
924 
925 		file = PyFile_FromFile(fp, "perf", "r", NULL);
926 #else
927 		file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1,
928 				     NULL, NULL, NULL, 0);
929 #endif
930 		if (file == NULL)
931 			goto free_list;
932 
933 		if (PyList_Append(list, file) != 0) {
934 			Py_DECREF(file);
935 			goto free_list;
936 		}
937 
938 		Py_DECREF(file);
939 	}
940 
941 	return list;
942 free_list:
943 	return PyErr_NoMemory();
944 }
945 
946 
947 static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
948 				  PyObject *args,
949 				  PyObject *kwargs __maybe_unused)
950 {
951 	struct evlist *evlist = &pevlist->evlist;
952 	PyObject *pevsel;
953 	struct evsel *evsel;
954 
955 	if (!PyArg_ParseTuple(args, "O", &pevsel))
956 		return NULL;
957 
958 	Py_INCREF(pevsel);
959 	evsel = &((struct pyrf_evsel *)pevsel)->evsel;
960 	evsel->core.idx = evlist->core.nr_entries;
961 	evlist__add(evlist, evsel);
962 
963 	return Py_BuildValue("i", evlist->core.nr_entries);
964 }
965 
966 static struct mmap *get_md(struct evlist *evlist, int cpu)
967 {
968 	int i;
969 
970 	for (i = 0; i < evlist->core.nr_mmaps; i++) {
971 		struct mmap *md = &evlist->mmap[i];
972 
973 		if (md->core.cpu.cpu == cpu)
974 			return md;
975 	}
976 
977 	return NULL;
978 }
979 
980 static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
981 					  PyObject *args, PyObject *kwargs)
982 {
983 	struct evlist *evlist = &pevlist->evlist;
984 	union perf_event *event;
985 	int sample_id_all = 1, cpu;
986 	static char *kwlist[] = { "cpu", "sample_id_all", NULL };
987 	struct mmap *md;
988 	int err;
989 
990 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
991 					 &cpu, &sample_id_all))
992 		return NULL;
993 
994 	md = get_md(evlist, cpu);
995 	if (!md)
996 		return NULL;
997 
998 	if (perf_mmap__read_init(&md->core) < 0)
999 		goto end;
1000 
1001 	event = perf_mmap__read_event(&md->core);
1002 	if (event != NULL) {
1003 		PyObject *pyevent = pyrf_event__new(event);
1004 		struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
1005 		struct evsel *evsel;
1006 
1007 		if (pyevent == NULL)
1008 			return PyErr_NoMemory();
1009 
1010 		evsel = evlist__event2evsel(evlist, event);
1011 		if (!evsel) {
1012 			Py_INCREF(Py_None);
1013 			return Py_None;
1014 		}
1015 
1016 		pevent->evsel = evsel;
1017 
1018 		err = evsel__parse_sample(evsel, event, &pevent->sample);
1019 
1020 		/* Consume the even only after we parsed it out. */
1021 		perf_mmap__consume(&md->core);
1022 
1023 		if (err)
1024 			return PyErr_Format(PyExc_OSError,
1025 					    "perf: can't parse sample, err=%d", err);
1026 		return pyevent;
1027 	}
1028 end:
1029 	Py_INCREF(Py_None);
1030 	return Py_None;
1031 }
1032 
1033 static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
1034 				   PyObject *args, PyObject *kwargs)
1035 {
1036 	struct evlist *evlist = &pevlist->evlist;
1037 
1038 	if (evlist__open(evlist) < 0) {
1039 		PyErr_SetFromErrno(PyExc_OSError);
1040 		return NULL;
1041 	}
1042 
1043 	Py_INCREF(Py_None);
1044 	return Py_None;
1045 }
1046 
1047 static PyMethodDef pyrf_evlist__methods[] = {
1048 	{
1049 		.ml_name  = "mmap",
1050 		.ml_meth  = (PyCFunction)pyrf_evlist__mmap,
1051 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1052 		.ml_doc	  = PyDoc_STR("mmap the file descriptor table.")
1053 	},
1054 	{
1055 		.ml_name  = "open",
1056 		.ml_meth  = (PyCFunction)pyrf_evlist__open,
1057 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1058 		.ml_doc	  = PyDoc_STR("open the file descriptors.")
1059 	},
1060 	{
1061 		.ml_name  = "poll",
1062 		.ml_meth  = (PyCFunction)pyrf_evlist__poll,
1063 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1064 		.ml_doc	  = PyDoc_STR("poll the file descriptor table.")
1065 	},
1066 	{
1067 		.ml_name  = "get_pollfd",
1068 		.ml_meth  = (PyCFunction)pyrf_evlist__get_pollfd,
1069 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1070 		.ml_doc	  = PyDoc_STR("get the poll file descriptor table.")
1071 	},
1072 	{
1073 		.ml_name  = "add",
1074 		.ml_meth  = (PyCFunction)pyrf_evlist__add,
1075 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1076 		.ml_doc	  = PyDoc_STR("adds an event selector to the list.")
1077 	},
1078 	{
1079 		.ml_name  = "read_on_cpu",
1080 		.ml_meth  = (PyCFunction)pyrf_evlist__read_on_cpu,
1081 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1082 		.ml_doc	  = PyDoc_STR("reads an event.")
1083 	},
1084 	{ .ml_name = NULL, }
1085 };
1086 
1087 static Py_ssize_t pyrf_evlist__length(PyObject *obj)
1088 {
1089 	struct pyrf_evlist *pevlist = (void *)obj;
1090 
1091 	return pevlist->evlist.core.nr_entries;
1092 }
1093 
1094 static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
1095 {
1096 	struct pyrf_evlist *pevlist = (void *)obj;
1097 	struct evsel *pos;
1098 
1099 	if (i >= pevlist->evlist.core.nr_entries)
1100 		return NULL;
1101 
1102 	evlist__for_each_entry(&pevlist->evlist, pos) {
1103 		if (i-- == 0)
1104 			break;
1105 	}
1106 
1107 	return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
1108 }
1109 
1110 static PySequenceMethods pyrf_evlist__sequence_methods = {
1111 	.sq_length = pyrf_evlist__length,
1112 	.sq_item   = pyrf_evlist__item,
1113 };
1114 
1115 static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
1116 
1117 static PyTypeObject pyrf_evlist__type = {
1118 	PyVarObject_HEAD_INIT(NULL, 0)
1119 	.tp_name	= "perf.evlist",
1120 	.tp_basicsize	= sizeof(struct pyrf_evlist),
1121 	.tp_dealloc	= (destructor)pyrf_evlist__delete,
1122 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
1123 	.tp_as_sequence	= &pyrf_evlist__sequence_methods,
1124 	.tp_doc		= pyrf_evlist__doc,
1125 	.tp_methods	= pyrf_evlist__methods,
1126 	.tp_init	= (initproc)pyrf_evlist__init,
1127 };
1128 
1129 static int pyrf_evlist__setup_types(void)
1130 {
1131 	pyrf_evlist__type.tp_new = PyType_GenericNew;
1132 	return PyType_Ready(&pyrf_evlist__type);
1133 }
1134 
1135 #define PERF_CONST(name) { #name, PERF_##name }
1136 
1137 static struct {
1138 	const char *name;
1139 	int	    value;
1140 } perf__constants[] = {
1141 	PERF_CONST(TYPE_HARDWARE),
1142 	PERF_CONST(TYPE_SOFTWARE),
1143 	PERF_CONST(TYPE_TRACEPOINT),
1144 	PERF_CONST(TYPE_HW_CACHE),
1145 	PERF_CONST(TYPE_RAW),
1146 	PERF_CONST(TYPE_BREAKPOINT),
1147 
1148 	PERF_CONST(COUNT_HW_CPU_CYCLES),
1149 	PERF_CONST(COUNT_HW_INSTRUCTIONS),
1150 	PERF_CONST(COUNT_HW_CACHE_REFERENCES),
1151 	PERF_CONST(COUNT_HW_CACHE_MISSES),
1152 	PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS),
1153 	PERF_CONST(COUNT_HW_BRANCH_MISSES),
1154 	PERF_CONST(COUNT_HW_BUS_CYCLES),
1155 	PERF_CONST(COUNT_HW_CACHE_L1D),
1156 	PERF_CONST(COUNT_HW_CACHE_L1I),
1157 	PERF_CONST(COUNT_HW_CACHE_LL),
1158 	PERF_CONST(COUNT_HW_CACHE_DTLB),
1159 	PERF_CONST(COUNT_HW_CACHE_ITLB),
1160 	PERF_CONST(COUNT_HW_CACHE_BPU),
1161 	PERF_CONST(COUNT_HW_CACHE_OP_READ),
1162 	PERF_CONST(COUNT_HW_CACHE_OP_WRITE),
1163 	PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH),
1164 	PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS),
1165 	PERF_CONST(COUNT_HW_CACHE_RESULT_MISS),
1166 
1167 	PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND),
1168 	PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND),
1169 
1170 	PERF_CONST(COUNT_SW_CPU_CLOCK),
1171 	PERF_CONST(COUNT_SW_TASK_CLOCK),
1172 	PERF_CONST(COUNT_SW_PAGE_FAULTS),
1173 	PERF_CONST(COUNT_SW_CONTEXT_SWITCHES),
1174 	PERF_CONST(COUNT_SW_CPU_MIGRATIONS),
1175 	PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN),
1176 	PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ),
1177 	PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS),
1178 	PERF_CONST(COUNT_SW_EMULATION_FAULTS),
1179 	PERF_CONST(COUNT_SW_DUMMY),
1180 
1181 	PERF_CONST(SAMPLE_IP),
1182 	PERF_CONST(SAMPLE_TID),
1183 	PERF_CONST(SAMPLE_TIME),
1184 	PERF_CONST(SAMPLE_ADDR),
1185 	PERF_CONST(SAMPLE_READ),
1186 	PERF_CONST(SAMPLE_CALLCHAIN),
1187 	PERF_CONST(SAMPLE_ID),
1188 	PERF_CONST(SAMPLE_CPU),
1189 	PERF_CONST(SAMPLE_PERIOD),
1190 	PERF_CONST(SAMPLE_STREAM_ID),
1191 	PERF_CONST(SAMPLE_RAW),
1192 
1193 	PERF_CONST(FORMAT_TOTAL_TIME_ENABLED),
1194 	PERF_CONST(FORMAT_TOTAL_TIME_RUNNING),
1195 	PERF_CONST(FORMAT_ID),
1196 	PERF_CONST(FORMAT_GROUP),
1197 
1198 	PERF_CONST(RECORD_MMAP),
1199 	PERF_CONST(RECORD_LOST),
1200 	PERF_CONST(RECORD_COMM),
1201 	PERF_CONST(RECORD_EXIT),
1202 	PERF_CONST(RECORD_THROTTLE),
1203 	PERF_CONST(RECORD_UNTHROTTLE),
1204 	PERF_CONST(RECORD_FORK),
1205 	PERF_CONST(RECORD_READ),
1206 	PERF_CONST(RECORD_SAMPLE),
1207 	PERF_CONST(RECORD_MMAP2),
1208 	PERF_CONST(RECORD_AUX),
1209 	PERF_CONST(RECORD_ITRACE_START),
1210 	PERF_CONST(RECORD_LOST_SAMPLES),
1211 	PERF_CONST(RECORD_SWITCH),
1212 	PERF_CONST(RECORD_SWITCH_CPU_WIDE),
1213 
1214 	PERF_CONST(RECORD_MISC_SWITCH_OUT),
1215 	{ .name = NULL, },
1216 };
1217 
1218 static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
1219 				  PyObject *args, PyObject *kwargs)
1220 {
1221 #ifndef HAVE_LIBTRACEEVENT
1222 	return NULL;
1223 #else
1224 	struct tep_event *tp_format;
1225 	static char *kwlist[] = { "sys", "name", NULL };
1226 	char *sys  = NULL;
1227 	char *name = NULL;
1228 
1229 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist,
1230 					 &sys, &name))
1231 		return NULL;
1232 
1233 	tp_format = trace_event__tp_format(sys, name);
1234 	if (IS_ERR(tp_format))
1235 		return _PyLong_FromLong(-1);
1236 
1237 	return _PyLong_FromLong(tp_format->id);
1238 #endif // HAVE_LIBTRACEEVENT
1239 }
1240 
1241 static PyMethodDef perf__methods[] = {
1242 	{
1243 		.ml_name  = "tracepoint",
1244 		.ml_meth  = (PyCFunction) pyrf__tracepoint,
1245 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1246 		.ml_doc	  = PyDoc_STR("Get tracepoint config.")
1247 	},
1248 	{ .ml_name = NULL, }
1249 };
1250 
1251 #if PY_MAJOR_VERSION < 3
1252 PyMODINIT_FUNC initperf(void)
1253 #else
1254 PyMODINIT_FUNC PyInit_perf(void)
1255 #endif
1256 {
1257 	PyObject *obj;
1258 	int i;
1259 	PyObject *dict;
1260 #if PY_MAJOR_VERSION < 3
1261 	PyObject *module = Py_InitModule("perf", perf__methods);
1262 #else
1263 	static struct PyModuleDef moduledef = {
1264 		PyModuleDef_HEAD_INIT,
1265 		"perf",			/* m_name */
1266 		"",			/* m_doc */
1267 		-1,			/* m_size */
1268 		perf__methods,		/* m_methods */
1269 		NULL,			/* m_reload */
1270 		NULL,			/* m_traverse */
1271 		NULL,			/* m_clear */
1272 		NULL,			/* m_free */
1273 	};
1274 	PyObject *module = PyModule_Create(&moduledef);
1275 #endif
1276 
1277 	if (module == NULL ||
1278 	    pyrf_event__setup_types() < 0 ||
1279 	    pyrf_evlist__setup_types() < 0 ||
1280 	    pyrf_evsel__setup_types() < 0 ||
1281 	    pyrf_thread_map__setup_types() < 0 ||
1282 	    pyrf_cpu_map__setup_types() < 0)
1283 #if PY_MAJOR_VERSION < 3
1284 		return;
1285 #else
1286 		return module;
1287 #endif
1288 
1289 	/* The page_size is placed in util object. */
1290 	page_size = sysconf(_SC_PAGE_SIZE);
1291 
1292 	Py_INCREF(&pyrf_evlist__type);
1293 	PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
1294 
1295 	Py_INCREF(&pyrf_evsel__type);
1296 	PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
1297 
1298 	Py_INCREF(&pyrf_mmap_event__type);
1299 	PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type);
1300 
1301 	Py_INCREF(&pyrf_lost_event__type);
1302 	PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type);
1303 
1304 	Py_INCREF(&pyrf_comm_event__type);
1305 	PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type);
1306 
1307 	Py_INCREF(&pyrf_task_event__type);
1308 	PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1309 
1310 	Py_INCREF(&pyrf_throttle_event__type);
1311 	PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type);
1312 
1313 	Py_INCREF(&pyrf_task_event__type);
1314 	PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1315 
1316 	Py_INCREF(&pyrf_read_event__type);
1317 	PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type);
1318 
1319 	Py_INCREF(&pyrf_sample_event__type);
1320 	PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type);
1321 
1322 	Py_INCREF(&pyrf_context_switch_event__type);
1323 	PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type);
1324 
1325 	Py_INCREF(&pyrf_thread_map__type);
1326 	PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
1327 
1328 	Py_INCREF(&pyrf_cpu_map__type);
1329 	PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
1330 
1331 	dict = PyModule_GetDict(module);
1332 	if (dict == NULL)
1333 		goto error;
1334 
1335 	for (i = 0; perf__constants[i].name != NULL; i++) {
1336 		obj = _PyLong_FromLong(perf__constants[i].value);
1337 		if (obj == NULL)
1338 			goto error;
1339 		PyDict_SetItemString(dict, perf__constants[i].name, obj);
1340 		Py_DECREF(obj);
1341 	}
1342 
1343 error:
1344 	if (PyErr_Occurred())
1345 		PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
1346 #if PY_MAJOR_VERSION >= 3
1347 	return module;
1348 #endif
1349 }
1350 
1351 
1352 /* The following are stubs to avoid dragging in builtin-* objects. */
1353 /* TODO: move the code out of the builtin-* file into util. */
1354 
1355 unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
1356 
1357 bool kvm_entry_event(struct evsel *evsel __maybe_unused)
1358 {
1359 	return false;
1360 }
1361 
1362 bool kvm_exit_event(struct evsel *evsel __maybe_unused)
1363 {
1364 	return false;
1365 }
1366 
1367 bool exit_event_begin(struct evsel *evsel __maybe_unused,
1368 		      struct perf_sample *sample  __maybe_unused,
1369 		      struct event_key *key  __maybe_unused)
1370 {
1371 	return false;
1372 }
1373 
1374 bool exit_event_end(struct evsel *evsel __maybe_unused,
1375 		    struct perf_sample *sample __maybe_unused,
1376 		    struct event_key *key __maybe_unused)
1377 {
1378 	return false;
1379 }
1380 
1381 void exit_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
1382 			   struct event_key *key __maybe_unused,
1383 			   char *decode __maybe_unused)
1384 {
1385 }
1386 
1387 int find_scripts(char **scripts_array  __maybe_unused, char **scripts_path_array  __maybe_unused,
1388 		int num  __maybe_unused, int pathlen __maybe_unused)
1389 {
1390 	return -1;
1391 }
1392 
1393 void perf_stat__set_no_csv_summary(int set __maybe_unused)
1394 {
1395 }
1396 
1397 void perf_stat__set_big_num(int set __maybe_unused)
1398 {
1399 }
1400 
1401 int script_spec_register(const char *spec __maybe_unused, struct scripting_ops *ops __maybe_unused)
1402 {
1403 	return -1;
1404 }
1405 
1406 arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch __maybe_unused)
1407 {
1408 	return NULL;
1409 }
1410 
1411 struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork __maybe_unused,
1412 				       struct kwork_class *class __maybe_unused,
1413 				       struct kwork_work *key  __maybe_unused)
1414 {
1415 	return NULL;
1416 }
1417 
1418 void script_fetch_insn(struct perf_sample *sample __maybe_unused,
1419 		struct thread *thread __maybe_unused,
1420 		struct machine *machine __maybe_unused)
1421 {
1422 }
1423 
1424 int perf_sample__sprintf_flags(u32 flags __maybe_unused, char *str __maybe_unused,
1425 			size_t sz __maybe_unused)
1426 {
1427 	return -1;
1428 }
1429 
1430 bool match_callstack_filter(struct machine *machine __maybe_unused, u64 *callstack __maybe_unused)
1431 {
1432 	return false;
1433 }
1434 
1435 struct lock_stat *lock_stat_find(u64 addr __maybe_unused)
1436 {
1437 	return NULL;
1438 }
1439 
1440 struct lock_stat *lock_stat_findnew(u64 addr __maybe_unused, const char *name __maybe_unused,
1441 				int flags __maybe_unused)
1442 {
1443 	return NULL;
1444 }
1445 
1446 int cmd_inject(int argc __maybe_unused, const char *argv[] __maybe_unused)
1447 {
1448 	return -1;
1449 }
1450