xref: /linux/tools/perf/util/python.c (revision a3a02a52bcfcbcc4a637d4b68bf1bc391c9fad02)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <Python.h>
3 #include <structmember.h>
4 #include <inttypes.h>
5 #include <poll.h>
6 #include <linux/err.h>
7 #include <perf/cpumap.h>
8 #ifdef HAVE_LIBTRACEEVENT
9 #include <traceevent/event-parse.h>
10 #endif
11 #include <perf/mmap.h>
12 #include "evlist.h"
13 #include "evsel.h"
14 #include "event.h"
15 #include "print_binary.h"
16 #include "thread_map.h"
17 #include "trace-event.h"
18 #include "mmap.h"
19 #include "util/bpf-filter.h"
20 #include "util/env.h"
21 #include "util/kvm-stat.h"
22 #include "util/kwork.h"
23 #include "util/sample.h"
24 #include "util/lock-contention.h"
25 #include <internal/lib.h>
26 #include "../builtin.h"
27 
28 #if PY_MAJOR_VERSION < 3
29 #define _PyUnicode_FromString(arg) \
30   PyString_FromString(arg)
31 #define _PyUnicode_AsString(arg) \
32   PyString_AsString(arg)
33 #define _PyUnicode_FromFormat(...) \
34   PyString_FromFormat(__VA_ARGS__)
35 #define _PyLong_FromLong(arg) \
36   PyInt_FromLong(arg)
37 
38 #else
39 
40 #define _PyUnicode_FromString(arg) \
41   PyUnicode_FromString(arg)
42 #define _PyUnicode_FromFormat(...) \
43   PyUnicode_FromFormat(__VA_ARGS__)
44 #define _PyLong_FromLong(arg) \
45   PyLong_FromLong(arg)
46 #endif
47 
48 #ifndef Py_TYPE
49 #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
50 #endif
51 
52 /* Define PyVarObject_HEAD_INIT for python 2.5 */
53 #ifndef PyVarObject_HEAD_INIT
54 # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
55 #endif
56 
57 #if PY_MAJOR_VERSION < 3
58 PyMODINIT_FUNC initperf(void);
59 #else
60 PyMODINIT_FUNC PyInit_perf(void);
61 #endif
62 
63 #define member_def(type, member, ptype, help) \
64 	{ #member, ptype, \
65 	  offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
66 	  0, help }
67 
68 #define sample_member_def(name, member, ptype, help) \
69 	{ #name, ptype, \
70 	  offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
71 	  0, help }
72 
73 struct pyrf_event {
74 	PyObject_HEAD
75 	struct evsel *evsel;
76 	struct perf_sample sample;
77 	union perf_event   event;
78 };
79 
80 #define sample_members \
81 	sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"),			 \
82 	sample_member_def(sample_pid, pid, T_INT, "event pid"),			 \
83 	sample_member_def(sample_tid, tid, T_INT, "event tid"),			 \
84 	sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"),		 \
85 	sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"),		 \
86 	sample_member_def(sample_id, id, T_ULONGLONG, "event id"),			 \
87 	sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
88 	sample_member_def(sample_period, period, T_ULONGLONG, "event period"),		 \
89 	sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
90 
91 static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
92 
93 static PyMemberDef pyrf_mmap_event__members[] = {
94 	sample_members
95 	member_def(perf_event_header, type, T_UINT, "event type"),
96 	member_def(perf_event_header, misc, T_UINT, "event misc"),
97 	member_def(perf_record_mmap, pid, T_UINT, "event pid"),
98 	member_def(perf_record_mmap, tid, T_UINT, "event tid"),
99 	member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"),
100 	member_def(perf_record_mmap, len, T_ULONGLONG, "map length"),
101 	member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"),
102 	member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"),
103 	{ .name = NULL, },
104 };
105 
106 static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
107 {
108 	PyObject *ret;
109 	char *s;
110 
111 	if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", "
112 			 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", "
113 			 "filename: %s }",
114 		     pevent->event.mmap.pid, pevent->event.mmap.tid,
115 		     pevent->event.mmap.start, pevent->event.mmap.len,
116 		     pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
117 		ret = PyErr_NoMemory();
118 	} else {
119 		ret = _PyUnicode_FromString(s);
120 		free(s);
121 	}
122 	return ret;
123 }
124 
125 static PyTypeObject pyrf_mmap_event__type = {
126 	PyVarObject_HEAD_INIT(NULL, 0)
127 	.tp_name	= "perf.mmap_event",
128 	.tp_basicsize	= sizeof(struct pyrf_event),
129 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
130 	.tp_doc		= pyrf_mmap_event__doc,
131 	.tp_members	= pyrf_mmap_event__members,
132 	.tp_repr	= (reprfunc)pyrf_mmap_event__repr,
133 };
134 
135 static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
136 
137 static PyMemberDef pyrf_task_event__members[] = {
138 	sample_members
139 	member_def(perf_event_header, type, T_UINT, "event type"),
140 	member_def(perf_record_fork, pid, T_UINT, "event pid"),
141 	member_def(perf_record_fork, ppid, T_UINT, "event ppid"),
142 	member_def(perf_record_fork, tid, T_UINT, "event tid"),
143 	member_def(perf_record_fork, ptid, T_UINT, "event ptid"),
144 	member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"),
145 	{ .name = NULL, },
146 };
147 
148 static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
149 {
150 	return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
151 				   "ptid: %u, time: %" PRI_lu64 "}",
152 				   pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
153 				   pevent->event.fork.pid,
154 				   pevent->event.fork.ppid,
155 				   pevent->event.fork.tid,
156 				   pevent->event.fork.ptid,
157 				   pevent->event.fork.time);
158 }
159 
160 static PyTypeObject pyrf_task_event__type = {
161 	PyVarObject_HEAD_INIT(NULL, 0)
162 	.tp_name	= "perf.task_event",
163 	.tp_basicsize	= sizeof(struct pyrf_event),
164 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
165 	.tp_doc		= pyrf_task_event__doc,
166 	.tp_members	= pyrf_task_event__members,
167 	.tp_repr	= (reprfunc)pyrf_task_event__repr,
168 };
169 
170 static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
171 
172 static PyMemberDef pyrf_comm_event__members[] = {
173 	sample_members
174 	member_def(perf_event_header, type, T_UINT, "event type"),
175 	member_def(perf_record_comm, pid, T_UINT, "event pid"),
176 	member_def(perf_record_comm, tid, T_UINT, "event tid"),
177 	member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"),
178 	{ .name = NULL, },
179 };
180 
181 static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
182 {
183 	return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
184 				   pevent->event.comm.pid,
185 				   pevent->event.comm.tid,
186 				   pevent->event.comm.comm);
187 }
188 
189 static PyTypeObject pyrf_comm_event__type = {
190 	PyVarObject_HEAD_INIT(NULL, 0)
191 	.tp_name	= "perf.comm_event",
192 	.tp_basicsize	= sizeof(struct pyrf_event),
193 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
194 	.tp_doc		= pyrf_comm_event__doc,
195 	.tp_members	= pyrf_comm_event__members,
196 	.tp_repr	= (reprfunc)pyrf_comm_event__repr,
197 };
198 
199 static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
200 
201 static PyMemberDef pyrf_throttle_event__members[] = {
202 	sample_members
203 	member_def(perf_event_header, type, T_UINT, "event type"),
204 	member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"),
205 	member_def(perf_record_throttle, id, T_ULONGLONG, "event id"),
206 	member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"),
207 	{ .name = NULL, },
208 };
209 
210 static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
211 {
212 	struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1);
213 
214 	return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64
215 				   ", stream_id: %" PRI_lu64 " }",
216 				   pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
217 				   te->time, te->id, te->stream_id);
218 }
219 
220 static PyTypeObject pyrf_throttle_event__type = {
221 	PyVarObject_HEAD_INIT(NULL, 0)
222 	.tp_name	= "perf.throttle_event",
223 	.tp_basicsize	= sizeof(struct pyrf_event),
224 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
225 	.tp_doc		= pyrf_throttle_event__doc,
226 	.tp_members	= pyrf_throttle_event__members,
227 	.tp_repr	= (reprfunc)pyrf_throttle_event__repr,
228 };
229 
230 static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
231 
232 static PyMemberDef pyrf_lost_event__members[] = {
233 	sample_members
234 	member_def(perf_record_lost, id, T_ULONGLONG, "event id"),
235 	member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"),
236 	{ .name = NULL, },
237 };
238 
239 static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent)
240 {
241 	PyObject *ret;
242 	char *s;
243 
244 	if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", "
245 			 "lost: %#" PRI_lx64 " }",
246 		     pevent->event.lost.id, pevent->event.lost.lost) < 0) {
247 		ret = PyErr_NoMemory();
248 	} else {
249 		ret = _PyUnicode_FromString(s);
250 		free(s);
251 	}
252 	return ret;
253 }
254 
255 static PyTypeObject pyrf_lost_event__type = {
256 	PyVarObject_HEAD_INIT(NULL, 0)
257 	.tp_name	= "perf.lost_event",
258 	.tp_basicsize	= sizeof(struct pyrf_event),
259 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
260 	.tp_doc		= pyrf_lost_event__doc,
261 	.tp_members	= pyrf_lost_event__members,
262 	.tp_repr	= (reprfunc)pyrf_lost_event__repr,
263 };
264 
265 static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
266 
267 static PyMemberDef pyrf_read_event__members[] = {
268 	sample_members
269 	member_def(perf_record_read, pid, T_UINT, "event pid"),
270 	member_def(perf_record_read, tid, T_UINT, "event tid"),
271 	{ .name = NULL, },
272 };
273 
274 static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent)
275 {
276 	return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }",
277 				   pevent->event.read.pid,
278 				   pevent->event.read.tid);
279 	/*
280  	 * FIXME: return the array of read values,
281  	 * making this method useful ;-)
282  	 */
283 }
284 
285 static PyTypeObject pyrf_read_event__type = {
286 	PyVarObject_HEAD_INIT(NULL, 0)
287 	.tp_name	= "perf.read_event",
288 	.tp_basicsize	= sizeof(struct pyrf_event),
289 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
290 	.tp_doc		= pyrf_read_event__doc,
291 	.tp_members	= pyrf_read_event__members,
292 	.tp_repr	= (reprfunc)pyrf_read_event__repr,
293 };
294 
295 static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
296 
297 static PyMemberDef pyrf_sample_event__members[] = {
298 	sample_members
299 	member_def(perf_event_header, type, T_UINT, "event type"),
300 	{ .name = NULL, },
301 };
302 
303 static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
304 {
305 	PyObject *ret;
306 	char *s;
307 
308 	if (asprintf(&s, "{ type: sample }") < 0) {
309 		ret = PyErr_NoMemory();
310 	} else {
311 		ret = _PyUnicode_FromString(s);
312 		free(s);
313 	}
314 	return ret;
315 }
316 
317 #ifdef HAVE_LIBTRACEEVENT
318 static bool is_tracepoint(struct pyrf_event *pevent)
319 {
320 	return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT;
321 }
322 
323 static PyObject*
324 tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
325 {
326 	struct tep_handle *pevent = field->event->tep;
327 	void *data = pe->sample.raw_data;
328 	PyObject *ret = NULL;
329 	unsigned long long val;
330 	unsigned int offset, len;
331 
332 	if (field->flags & TEP_FIELD_IS_ARRAY) {
333 		offset = field->offset;
334 		len    = field->size;
335 		if (field->flags & TEP_FIELD_IS_DYNAMIC) {
336 			val     = tep_read_number(pevent, data + offset, len);
337 			offset  = val;
338 			len     = offset >> 16;
339 			offset &= 0xffff;
340 			if (tep_field_is_relative(field->flags))
341 				offset += field->offset + field->size;
342 		}
343 		if (field->flags & TEP_FIELD_IS_STRING &&
344 		    is_printable_array(data + offset, len)) {
345 			ret = _PyUnicode_FromString((char *)data + offset);
346 		} else {
347 			ret = PyByteArray_FromStringAndSize((const char *) data + offset, len);
348 			field->flags &= ~TEP_FIELD_IS_STRING;
349 		}
350 	} else {
351 		val = tep_read_number(pevent, data + field->offset,
352 				      field->size);
353 		if (field->flags & TEP_FIELD_IS_POINTER)
354 			ret = PyLong_FromUnsignedLong((unsigned long) val);
355 		else if (field->flags & TEP_FIELD_IS_SIGNED)
356 			ret = PyLong_FromLong((long) val);
357 		else
358 			ret = PyLong_FromUnsignedLong((unsigned long) val);
359 	}
360 
361 	return ret;
362 }
363 
364 static PyObject*
365 get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
366 {
367 	const char *str = _PyUnicode_AsString(PyObject_Str(attr_name));
368 	struct evsel *evsel = pevent->evsel;
369 	struct tep_format_field *field;
370 
371 	if (!evsel->tp_format) {
372 		struct tep_event *tp_format;
373 
374 		tp_format = trace_event__tp_format_id(evsel->core.attr.config);
375 		if (IS_ERR_OR_NULL(tp_format))
376 			return NULL;
377 
378 		evsel->tp_format = tp_format;
379 	}
380 
381 	field = tep_find_any_field(evsel->tp_format, str);
382 	if (!field)
383 		return NULL;
384 
385 	return tracepoint_field(pevent, field);
386 }
387 #endif /* HAVE_LIBTRACEEVENT */
388 
389 static PyObject*
390 pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name)
391 {
392 	PyObject *obj = NULL;
393 
394 #ifdef HAVE_LIBTRACEEVENT
395 	if (is_tracepoint(pevent))
396 		obj = get_tracepoint_field(pevent, attr_name);
397 #endif
398 
399 	return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name);
400 }
401 
402 static PyTypeObject pyrf_sample_event__type = {
403 	PyVarObject_HEAD_INIT(NULL, 0)
404 	.tp_name	= "perf.sample_event",
405 	.tp_basicsize	= sizeof(struct pyrf_event),
406 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
407 	.tp_doc		= pyrf_sample_event__doc,
408 	.tp_members	= pyrf_sample_event__members,
409 	.tp_repr	= (reprfunc)pyrf_sample_event__repr,
410 	.tp_getattro	= (getattrofunc) pyrf_sample_event__getattro,
411 };
412 
413 static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
414 
415 static PyMemberDef pyrf_context_switch_event__members[] = {
416 	sample_members
417 	member_def(perf_event_header, type, T_UINT, "event type"),
418 	member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"),
419 	member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"),
420 	{ .name = NULL, },
421 };
422 
423 static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent)
424 {
425 	PyObject *ret;
426 	char *s;
427 
428 	if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }",
429 		     pevent->event.context_switch.next_prev_pid,
430 		     pevent->event.context_switch.next_prev_tid,
431 		     !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) {
432 		ret = PyErr_NoMemory();
433 	} else {
434 		ret = _PyUnicode_FromString(s);
435 		free(s);
436 	}
437 	return ret;
438 }
439 
440 static PyTypeObject pyrf_context_switch_event__type = {
441 	PyVarObject_HEAD_INIT(NULL, 0)
442 	.tp_name	= "perf.context_switch_event",
443 	.tp_basicsize	= sizeof(struct pyrf_event),
444 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
445 	.tp_doc		= pyrf_context_switch_event__doc,
446 	.tp_members	= pyrf_context_switch_event__members,
447 	.tp_repr	= (reprfunc)pyrf_context_switch_event__repr,
448 };
449 
450 static int pyrf_event__setup_types(void)
451 {
452 	int err;
453 	pyrf_mmap_event__type.tp_new =
454 	pyrf_task_event__type.tp_new =
455 	pyrf_comm_event__type.tp_new =
456 	pyrf_lost_event__type.tp_new =
457 	pyrf_read_event__type.tp_new =
458 	pyrf_sample_event__type.tp_new =
459 	pyrf_context_switch_event__type.tp_new =
460 	pyrf_throttle_event__type.tp_new = PyType_GenericNew;
461 	err = PyType_Ready(&pyrf_mmap_event__type);
462 	if (err < 0)
463 		goto out;
464 	err = PyType_Ready(&pyrf_lost_event__type);
465 	if (err < 0)
466 		goto out;
467 	err = PyType_Ready(&pyrf_task_event__type);
468 	if (err < 0)
469 		goto out;
470 	err = PyType_Ready(&pyrf_comm_event__type);
471 	if (err < 0)
472 		goto out;
473 	err = PyType_Ready(&pyrf_throttle_event__type);
474 	if (err < 0)
475 		goto out;
476 	err = PyType_Ready(&pyrf_read_event__type);
477 	if (err < 0)
478 		goto out;
479 	err = PyType_Ready(&pyrf_sample_event__type);
480 	if (err < 0)
481 		goto out;
482 	err = PyType_Ready(&pyrf_context_switch_event__type);
483 	if (err < 0)
484 		goto out;
485 out:
486 	return err;
487 }
488 
489 static PyTypeObject *pyrf_event__type[] = {
490 	[PERF_RECORD_MMAP]	 = &pyrf_mmap_event__type,
491 	[PERF_RECORD_LOST]	 = &pyrf_lost_event__type,
492 	[PERF_RECORD_COMM]	 = &pyrf_comm_event__type,
493 	[PERF_RECORD_EXIT]	 = &pyrf_task_event__type,
494 	[PERF_RECORD_THROTTLE]	 = &pyrf_throttle_event__type,
495 	[PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
496 	[PERF_RECORD_FORK]	 = &pyrf_task_event__type,
497 	[PERF_RECORD_READ]	 = &pyrf_read_event__type,
498 	[PERF_RECORD_SAMPLE]	 = &pyrf_sample_event__type,
499 	[PERF_RECORD_SWITCH]	 = &pyrf_context_switch_event__type,
500 	[PERF_RECORD_SWITCH_CPU_WIDE]  = &pyrf_context_switch_event__type,
501 };
502 
503 static PyObject *pyrf_event__new(union perf_event *event)
504 {
505 	struct pyrf_event *pevent;
506 	PyTypeObject *ptype;
507 
508 	if ((event->header.type < PERF_RECORD_MMAP ||
509 	     event->header.type > PERF_RECORD_SAMPLE) &&
510 	    !(event->header.type == PERF_RECORD_SWITCH ||
511 	      event->header.type == PERF_RECORD_SWITCH_CPU_WIDE))
512 		return NULL;
513 
514 	ptype = pyrf_event__type[event->header.type];
515 	pevent = PyObject_New(struct pyrf_event, ptype);
516 	if (pevent != NULL)
517 		memcpy(&pevent->event, event, event->header.size);
518 	return (PyObject *)pevent;
519 }
520 
521 struct pyrf_cpu_map {
522 	PyObject_HEAD
523 
524 	struct perf_cpu_map *cpus;
525 };
526 
527 static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
528 			      PyObject *args, PyObject *kwargs)
529 {
530 	static char *kwlist[] = { "cpustr", NULL };
531 	char *cpustr = NULL;
532 
533 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
534 					 kwlist, &cpustr))
535 		return -1;
536 
537 	pcpus->cpus = perf_cpu_map__new(cpustr);
538 	if (pcpus->cpus == NULL)
539 		return -1;
540 	return 0;
541 }
542 
543 static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
544 {
545 	perf_cpu_map__put(pcpus->cpus);
546 	Py_TYPE(pcpus)->tp_free((PyObject*)pcpus);
547 }
548 
549 static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
550 {
551 	struct pyrf_cpu_map *pcpus = (void *)obj;
552 
553 	return perf_cpu_map__nr(pcpus->cpus);
554 }
555 
556 static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
557 {
558 	struct pyrf_cpu_map *pcpus = (void *)obj;
559 
560 	if (i >= perf_cpu_map__nr(pcpus->cpus))
561 		return NULL;
562 
563 	return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu);
564 }
565 
566 static PySequenceMethods pyrf_cpu_map__sequence_methods = {
567 	.sq_length = pyrf_cpu_map__length,
568 	.sq_item   = pyrf_cpu_map__item,
569 };
570 
571 static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
572 
573 static PyTypeObject pyrf_cpu_map__type = {
574 	PyVarObject_HEAD_INIT(NULL, 0)
575 	.tp_name	= "perf.cpu_map",
576 	.tp_basicsize	= sizeof(struct pyrf_cpu_map),
577 	.tp_dealloc	= (destructor)pyrf_cpu_map__delete,
578 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
579 	.tp_doc		= pyrf_cpu_map__doc,
580 	.tp_as_sequence	= &pyrf_cpu_map__sequence_methods,
581 	.tp_init	= (initproc)pyrf_cpu_map__init,
582 };
583 
584 static int pyrf_cpu_map__setup_types(void)
585 {
586 	pyrf_cpu_map__type.tp_new = PyType_GenericNew;
587 	return PyType_Ready(&pyrf_cpu_map__type);
588 }
589 
590 struct pyrf_thread_map {
591 	PyObject_HEAD
592 
593 	struct perf_thread_map *threads;
594 };
595 
596 static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
597 				 PyObject *args, PyObject *kwargs)
598 {
599 	static char *kwlist[] = { "pid", "tid", "uid", NULL };
600 	int pid = -1, tid = -1, uid = UINT_MAX;
601 
602 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii",
603 					 kwlist, &pid, &tid, &uid))
604 		return -1;
605 
606 	pthreads->threads = thread_map__new(pid, tid, uid);
607 	if (pthreads->threads == NULL)
608 		return -1;
609 	return 0;
610 }
611 
612 static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
613 {
614 	perf_thread_map__put(pthreads->threads);
615 	Py_TYPE(pthreads)->tp_free((PyObject*)pthreads);
616 }
617 
618 static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
619 {
620 	struct pyrf_thread_map *pthreads = (void *)obj;
621 
622 	return perf_thread_map__nr(pthreads->threads);
623 }
624 
625 static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
626 {
627 	struct pyrf_thread_map *pthreads = (void *)obj;
628 
629 	if (i >= perf_thread_map__nr(pthreads->threads))
630 		return NULL;
631 
632 	return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i));
633 }
634 
635 static PySequenceMethods pyrf_thread_map__sequence_methods = {
636 	.sq_length = pyrf_thread_map__length,
637 	.sq_item   = pyrf_thread_map__item,
638 };
639 
640 static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
641 
642 static PyTypeObject pyrf_thread_map__type = {
643 	PyVarObject_HEAD_INIT(NULL, 0)
644 	.tp_name	= "perf.thread_map",
645 	.tp_basicsize	= sizeof(struct pyrf_thread_map),
646 	.tp_dealloc	= (destructor)pyrf_thread_map__delete,
647 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
648 	.tp_doc		= pyrf_thread_map__doc,
649 	.tp_as_sequence	= &pyrf_thread_map__sequence_methods,
650 	.tp_init	= (initproc)pyrf_thread_map__init,
651 };
652 
653 static int pyrf_thread_map__setup_types(void)
654 {
655 	pyrf_thread_map__type.tp_new = PyType_GenericNew;
656 	return PyType_Ready(&pyrf_thread_map__type);
657 }
658 
659 struct pyrf_evsel {
660 	PyObject_HEAD
661 
662 	struct evsel evsel;
663 };
664 
665 static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
666 			    PyObject *args, PyObject *kwargs)
667 {
668 	struct perf_event_attr attr = {
669 		.type = PERF_TYPE_HARDWARE,
670 		.config = PERF_COUNT_HW_CPU_CYCLES,
671 		.sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
672 	};
673 	static char *kwlist[] = {
674 		"type",
675 		"config",
676 		"sample_freq",
677 		"sample_period",
678 		"sample_type",
679 		"read_format",
680 		"disabled",
681 		"inherit",
682 		"pinned",
683 		"exclusive",
684 		"exclude_user",
685 		"exclude_kernel",
686 		"exclude_hv",
687 		"exclude_idle",
688 		"mmap",
689 		"context_switch",
690 		"comm",
691 		"freq",
692 		"inherit_stat",
693 		"enable_on_exec",
694 		"task",
695 		"watermark",
696 		"precise_ip",
697 		"mmap_data",
698 		"sample_id_all",
699 		"wakeup_events",
700 		"bp_type",
701 		"bp_addr",
702 		"bp_len",
703 		 NULL
704 	};
705 	u64 sample_period = 0;
706 	u32 disabled = 0,
707 	    inherit = 0,
708 	    pinned = 0,
709 	    exclusive = 0,
710 	    exclude_user = 0,
711 	    exclude_kernel = 0,
712 	    exclude_hv = 0,
713 	    exclude_idle = 0,
714 	    mmap = 0,
715 	    context_switch = 0,
716 	    comm = 0,
717 	    freq = 1,
718 	    inherit_stat = 0,
719 	    enable_on_exec = 0,
720 	    task = 0,
721 	    watermark = 0,
722 	    precise_ip = 0,
723 	    mmap_data = 0,
724 	    sample_id_all = 1;
725 	int idx = 0;
726 
727 	if (!PyArg_ParseTupleAndKeywords(args, kwargs,
728 					 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
729 					 &attr.type, &attr.config, &attr.sample_freq,
730 					 &sample_period, &attr.sample_type,
731 					 &attr.read_format, &disabled, &inherit,
732 					 &pinned, &exclusive, &exclude_user,
733 					 &exclude_kernel, &exclude_hv, &exclude_idle,
734 					 &mmap, &context_switch, &comm, &freq, &inherit_stat,
735 					 &enable_on_exec, &task, &watermark,
736 					 &precise_ip, &mmap_data, &sample_id_all,
737 					 &attr.wakeup_events, &attr.bp_type,
738 					 &attr.bp_addr, &attr.bp_len, &idx))
739 		return -1;
740 
741 	/* union... */
742 	if (sample_period != 0) {
743 		if (attr.sample_freq != 0)
744 			return -1; /* FIXME: throw right exception */
745 		attr.sample_period = sample_period;
746 	}
747 
748 	/* Bitfields */
749 	attr.disabled	    = disabled;
750 	attr.inherit	    = inherit;
751 	attr.pinned	    = pinned;
752 	attr.exclusive	    = exclusive;
753 	attr.exclude_user   = exclude_user;
754 	attr.exclude_kernel = exclude_kernel;
755 	attr.exclude_hv	    = exclude_hv;
756 	attr.exclude_idle   = exclude_idle;
757 	attr.mmap	    = mmap;
758 	attr.context_switch = context_switch;
759 	attr.comm	    = comm;
760 	attr.freq	    = freq;
761 	attr.inherit_stat   = inherit_stat;
762 	attr.enable_on_exec = enable_on_exec;
763 	attr.task	    = task;
764 	attr.watermark	    = watermark;
765 	attr.precise_ip	    = precise_ip;
766 	attr.mmap_data	    = mmap_data;
767 	attr.sample_id_all  = sample_id_all;
768 	attr.size	    = sizeof(attr);
769 
770 	evsel__init(&pevsel->evsel, &attr, idx);
771 	return 0;
772 }
773 
774 static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
775 {
776 	evsel__exit(&pevsel->evsel);
777 	Py_TYPE(pevsel)->tp_free((PyObject*)pevsel);
778 }
779 
780 static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
781 				  PyObject *args, PyObject *kwargs)
782 {
783 	struct evsel *evsel = &pevsel->evsel;
784 	struct perf_cpu_map *cpus = NULL;
785 	struct perf_thread_map *threads = NULL;
786 	PyObject *pcpus = NULL, *pthreads = NULL;
787 	int group = 0, inherit = 0;
788 	static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL };
789 
790 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
791 					 &pcpus, &pthreads, &group, &inherit))
792 		return NULL;
793 
794 	if (pthreads != NULL)
795 		threads = ((struct pyrf_thread_map *)pthreads)->threads;
796 
797 	if (pcpus != NULL)
798 		cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
799 
800 	evsel->core.attr.inherit = inherit;
801 	/*
802 	 * This will group just the fds for this single evsel, to group
803 	 * multiple events, use evlist.open().
804 	 */
805 	if (evsel__open(evsel, cpus, threads) < 0) {
806 		PyErr_SetFromErrno(PyExc_OSError);
807 		return NULL;
808 	}
809 
810 	Py_INCREF(Py_None);
811 	return Py_None;
812 }
813 
814 static PyMethodDef pyrf_evsel__methods[] = {
815 	{
816 		.ml_name  = "open",
817 		.ml_meth  = (PyCFunction)pyrf_evsel__open,
818 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
819 		.ml_doc	  = PyDoc_STR("open the event selector file descriptor table.")
820 	},
821 	{ .ml_name = NULL, }
822 };
823 
824 static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
825 
826 static PyTypeObject pyrf_evsel__type = {
827 	PyVarObject_HEAD_INIT(NULL, 0)
828 	.tp_name	= "perf.evsel",
829 	.tp_basicsize	= sizeof(struct pyrf_evsel),
830 	.tp_dealloc	= (destructor)pyrf_evsel__delete,
831 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
832 	.tp_doc		= pyrf_evsel__doc,
833 	.tp_methods	= pyrf_evsel__methods,
834 	.tp_init	= (initproc)pyrf_evsel__init,
835 };
836 
837 static int pyrf_evsel__setup_types(void)
838 {
839 	pyrf_evsel__type.tp_new = PyType_GenericNew;
840 	return PyType_Ready(&pyrf_evsel__type);
841 }
842 
843 struct pyrf_evlist {
844 	PyObject_HEAD
845 
846 	struct evlist evlist;
847 };
848 
849 static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
850 			     PyObject *args, PyObject *kwargs __maybe_unused)
851 {
852 	PyObject *pcpus = NULL, *pthreads = NULL;
853 	struct perf_cpu_map *cpus;
854 	struct perf_thread_map *threads;
855 
856 	if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
857 		return -1;
858 
859 	threads = ((struct pyrf_thread_map *)pthreads)->threads;
860 	cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
861 	evlist__init(&pevlist->evlist, cpus, threads);
862 	return 0;
863 }
864 
865 static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
866 {
867 	evlist__exit(&pevlist->evlist);
868 	Py_TYPE(pevlist)->tp_free((PyObject*)pevlist);
869 }
870 
871 static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
872 				   PyObject *args, PyObject *kwargs)
873 {
874 	struct evlist *evlist = &pevlist->evlist;
875 	static char *kwlist[] = { "pages", "overwrite", NULL };
876 	int pages = 128, overwrite = false;
877 
878 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
879 					 &pages, &overwrite))
880 		return NULL;
881 
882 	if (evlist__mmap(evlist, pages) < 0) {
883 		PyErr_SetFromErrno(PyExc_OSError);
884 		return NULL;
885 	}
886 
887 	Py_INCREF(Py_None);
888 	return Py_None;
889 }
890 
891 static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
892 				   PyObject *args, PyObject *kwargs)
893 {
894 	struct evlist *evlist = &pevlist->evlist;
895 	static char *kwlist[] = { "timeout", NULL };
896 	int timeout = -1, n;
897 
898 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
899 		return NULL;
900 
901 	n = evlist__poll(evlist, timeout);
902 	if (n < 0) {
903 		PyErr_SetFromErrno(PyExc_OSError);
904 		return NULL;
905 	}
906 
907 	return Py_BuildValue("i", n);
908 }
909 
910 static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
911 					 PyObject *args __maybe_unused,
912 					 PyObject *kwargs __maybe_unused)
913 {
914 	struct evlist *evlist = &pevlist->evlist;
915         PyObject *list = PyList_New(0);
916 	int i;
917 
918 	for (i = 0; i < evlist->core.pollfd.nr; ++i) {
919 		PyObject *file;
920 #if PY_MAJOR_VERSION < 3
921 		FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r");
922 
923 		if (fp == NULL)
924 			goto free_list;
925 
926 		file = PyFile_FromFile(fp, "perf", "r", NULL);
927 #else
928 		file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1,
929 				     NULL, NULL, NULL, 0);
930 #endif
931 		if (file == NULL)
932 			goto free_list;
933 
934 		if (PyList_Append(list, file) != 0) {
935 			Py_DECREF(file);
936 			goto free_list;
937 		}
938 
939 		Py_DECREF(file);
940 	}
941 
942 	return list;
943 free_list:
944 	return PyErr_NoMemory();
945 }
946 
947 
948 static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
949 				  PyObject *args,
950 				  PyObject *kwargs __maybe_unused)
951 {
952 	struct evlist *evlist = &pevlist->evlist;
953 	PyObject *pevsel;
954 	struct evsel *evsel;
955 
956 	if (!PyArg_ParseTuple(args, "O", &pevsel))
957 		return NULL;
958 
959 	Py_INCREF(pevsel);
960 	evsel = &((struct pyrf_evsel *)pevsel)->evsel;
961 	evsel->core.idx = evlist->core.nr_entries;
962 	evlist__add(evlist, evsel);
963 
964 	return Py_BuildValue("i", evlist->core.nr_entries);
965 }
966 
967 static struct mmap *get_md(struct evlist *evlist, int cpu)
968 {
969 	int i;
970 
971 	for (i = 0; i < evlist->core.nr_mmaps; i++) {
972 		struct mmap *md = &evlist->mmap[i];
973 
974 		if (md->core.cpu.cpu == cpu)
975 			return md;
976 	}
977 
978 	return NULL;
979 }
980 
981 static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
982 					  PyObject *args, PyObject *kwargs)
983 {
984 	struct evlist *evlist = &pevlist->evlist;
985 	union perf_event *event;
986 	int sample_id_all = 1, cpu;
987 	static char *kwlist[] = { "cpu", "sample_id_all", NULL };
988 	struct mmap *md;
989 	int err;
990 
991 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
992 					 &cpu, &sample_id_all))
993 		return NULL;
994 
995 	md = get_md(evlist, cpu);
996 	if (!md)
997 		return NULL;
998 
999 	if (perf_mmap__read_init(&md->core) < 0)
1000 		goto end;
1001 
1002 	event = perf_mmap__read_event(&md->core);
1003 	if (event != NULL) {
1004 		PyObject *pyevent = pyrf_event__new(event);
1005 		struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
1006 		struct evsel *evsel;
1007 
1008 		if (pyevent == NULL)
1009 			return PyErr_NoMemory();
1010 
1011 		evsel = evlist__event2evsel(evlist, event);
1012 		if (!evsel) {
1013 			Py_INCREF(Py_None);
1014 			return Py_None;
1015 		}
1016 
1017 		pevent->evsel = evsel;
1018 
1019 		err = evsel__parse_sample(evsel, event, &pevent->sample);
1020 
1021 		/* Consume the even only after we parsed it out. */
1022 		perf_mmap__consume(&md->core);
1023 
1024 		if (err)
1025 			return PyErr_Format(PyExc_OSError,
1026 					    "perf: can't parse sample, err=%d", err);
1027 		return pyevent;
1028 	}
1029 end:
1030 	Py_INCREF(Py_None);
1031 	return Py_None;
1032 }
1033 
1034 static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
1035 				   PyObject *args, PyObject *kwargs)
1036 {
1037 	struct evlist *evlist = &pevlist->evlist;
1038 
1039 	if (evlist__open(evlist) < 0) {
1040 		PyErr_SetFromErrno(PyExc_OSError);
1041 		return NULL;
1042 	}
1043 
1044 	Py_INCREF(Py_None);
1045 	return Py_None;
1046 }
1047 
1048 static PyMethodDef pyrf_evlist__methods[] = {
1049 	{
1050 		.ml_name  = "mmap",
1051 		.ml_meth  = (PyCFunction)pyrf_evlist__mmap,
1052 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1053 		.ml_doc	  = PyDoc_STR("mmap the file descriptor table.")
1054 	},
1055 	{
1056 		.ml_name  = "open",
1057 		.ml_meth  = (PyCFunction)pyrf_evlist__open,
1058 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1059 		.ml_doc	  = PyDoc_STR("open the file descriptors.")
1060 	},
1061 	{
1062 		.ml_name  = "poll",
1063 		.ml_meth  = (PyCFunction)pyrf_evlist__poll,
1064 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1065 		.ml_doc	  = PyDoc_STR("poll the file descriptor table.")
1066 	},
1067 	{
1068 		.ml_name  = "get_pollfd",
1069 		.ml_meth  = (PyCFunction)pyrf_evlist__get_pollfd,
1070 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1071 		.ml_doc	  = PyDoc_STR("get the poll file descriptor table.")
1072 	},
1073 	{
1074 		.ml_name  = "add",
1075 		.ml_meth  = (PyCFunction)pyrf_evlist__add,
1076 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1077 		.ml_doc	  = PyDoc_STR("adds an event selector to the list.")
1078 	},
1079 	{
1080 		.ml_name  = "read_on_cpu",
1081 		.ml_meth  = (PyCFunction)pyrf_evlist__read_on_cpu,
1082 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1083 		.ml_doc	  = PyDoc_STR("reads an event.")
1084 	},
1085 	{ .ml_name = NULL, }
1086 };
1087 
1088 static Py_ssize_t pyrf_evlist__length(PyObject *obj)
1089 {
1090 	struct pyrf_evlist *pevlist = (void *)obj;
1091 
1092 	return pevlist->evlist.core.nr_entries;
1093 }
1094 
1095 static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
1096 {
1097 	struct pyrf_evlist *pevlist = (void *)obj;
1098 	struct evsel *pos;
1099 
1100 	if (i >= pevlist->evlist.core.nr_entries)
1101 		return NULL;
1102 
1103 	evlist__for_each_entry(&pevlist->evlist, pos) {
1104 		if (i-- == 0)
1105 			break;
1106 	}
1107 
1108 	return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
1109 }
1110 
1111 static PySequenceMethods pyrf_evlist__sequence_methods = {
1112 	.sq_length = pyrf_evlist__length,
1113 	.sq_item   = pyrf_evlist__item,
1114 };
1115 
1116 static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
1117 
1118 static PyTypeObject pyrf_evlist__type = {
1119 	PyVarObject_HEAD_INIT(NULL, 0)
1120 	.tp_name	= "perf.evlist",
1121 	.tp_basicsize	= sizeof(struct pyrf_evlist),
1122 	.tp_dealloc	= (destructor)pyrf_evlist__delete,
1123 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
1124 	.tp_as_sequence	= &pyrf_evlist__sequence_methods,
1125 	.tp_doc		= pyrf_evlist__doc,
1126 	.tp_methods	= pyrf_evlist__methods,
1127 	.tp_init	= (initproc)pyrf_evlist__init,
1128 };
1129 
1130 static int pyrf_evlist__setup_types(void)
1131 {
1132 	pyrf_evlist__type.tp_new = PyType_GenericNew;
1133 	return PyType_Ready(&pyrf_evlist__type);
1134 }
1135 
1136 #define PERF_CONST(name) { #name, PERF_##name }
1137 
1138 static struct {
1139 	const char *name;
1140 	int	    value;
1141 } perf__constants[] = {
1142 	PERF_CONST(TYPE_HARDWARE),
1143 	PERF_CONST(TYPE_SOFTWARE),
1144 	PERF_CONST(TYPE_TRACEPOINT),
1145 	PERF_CONST(TYPE_HW_CACHE),
1146 	PERF_CONST(TYPE_RAW),
1147 	PERF_CONST(TYPE_BREAKPOINT),
1148 
1149 	PERF_CONST(COUNT_HW_CPU_CYCLES),
1150 	PERF_CONST(COUNT_HW_INSTRUCTIONS),
1151 	PERF_CONST(COUNT_HW_CACHE_REFERENCES),
1152 	PERF_CONST(COUNT_HW_CACHE_MISSES),
1153 	PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS),
1154 	PERF_CONST(COUNT_HW_BRANCH_MISSES),
1155 	PERF_CONST(COUNT_HW_BUS_CYCLES),
1156 	PERF_CONST(COUNT_HW_CACHE_L1D),
1157 	PERF_CONST(COUNT_HW_CACHE_L1I),
1158 	PERF_CONST(COUNT_HW_CACHE_LL),
1159 	PERF_CONST(COUNT_HW_CACHE_DTLB),
1160 	PERF_CONST(COUNT_HW_CACHE_ITLB),
1161 	PERF_CONST(COUNT_HW_CACHE_BPU),
1162 	PERF_CONST(COUNT_HW_CACHE_OP_READ),
1163 	PERF_CONST(COUNT_HW_CACHE_OP_WRITE),
1164 	PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH),
1165 	PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS),
1166 	PERF_CONST(COUNT_HW_CACHE_RESULT_MISS),
1167 
1168 	PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND),
1169 	PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND),
1170 
1171 	PERF_CONST(COUNT_SW_CPU_CLOCK),
1172 	PERF_CONST(COUNT_SW_TASK_CLOCK),
1173 	PERF_CONST(COUNT_SW_PAGE_FAULTS),
1174 	PERF_CONST(COUNT_SW_CONTEXT_SWITCHES),
1175 	PERF_CONST(COUNT_SW_CPU_MIGRATIONS),
1176 	PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN),
1177 	PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ),
1178 	PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS),
1179 	PERF_CONST(COUNT_SW_EMULATION_FAULTS),
1180 	PERF_CONST(COUNT_SW_DUMMY),
1181 
1182 	PERF_CONST(SAMPLE_IP),
1183 	PERF_CONST(SAMPLE_TID),
1184 	PERF_CONST(SAMPLE_TIME),
1185 	PERF_CONST(SAMPLE_ADDR),
1186 	PERF_CONST(SAMPLE_READ),
1187 	PERF_CONST(SAMPLE_CALLCHAIN),
1188 	PERF_CONST(SAMPLE_ID),
1189 	PERF_CONST(SAMPLE_CPU),
1190 	PERF_CONST(SAMPLE_PERIOD),
1191 	PERF_CONST(SAMPLE_STREAM_ID),
1192 	PERF_CONST(SAMPLE_RAW),
1193 
1194 	PERF_CONST(FORMAT_TOTAL_TIME_ENABLED),
1195 	PERF_CONST(FORMAT_TOTAL_TIME_RUNNING),
1196 	PERF_CONST(FORMAT_ID),
1197 	PERF_CONST(FORMAT_GROUP),
1198 
1199 	PERF_CONST(RECORD_MMAP),
1200 	PERF_CONST(RECORD_LOST),
1201 	PERF_CONST(RECORD_COMM),
1202 	PERF_CONST(RECORD_EXIT),
1203 	PERF_CONST(RECORD_THROTTLE),
1204 	PERF_CONST(RECORD_UNTHROTTLE),
1205 	PERF_CONST(RECORD_FORK),
1206 	PERF_CONST(RECORD_READ),
1207 	PERF_CONST(RECORD_SAMPLE),
1208 	PERF_CONST(RECORD_MMAP2),
1209 	PERF_CONST(RECORD_AUX),
1210 	PERF_CONST(RECORD_ITRACE_START),
1211 	PERF_CONST(RECORD_LOST_SAMPLES),
1212 	PERF_CONST(RECORD_SWITCH),
1213 	PERF_CONST(RECORD_SWITCH_CPU_WIDE),
1214 
1215 	PERF_CONST(RECORD_MISC_SWITCH_OUT),
1216 	{ .name = NULL, },
1217 };
1218 
1219 static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
1220 				  PyObject *args, PyObject *kwargs)
1221 {
1222 #ifndef HAVE_LIBTRACEEVENT
1223 	return NULL;
1224 #else
1225 	struct tep_event *tp_format;
1226 	static char *kwlist[] = { "sys", "name", NULL };
1227 	char *sys  = NULL;
1228 	char *name = NULL;
1229 
1230 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist,
1231 					 &sys, &name))
1232 		return NULL;
1233 
1234 	tp_format = trace_event__tp_format(sys, name);
1235 	if (IS_ERR(tp_format))
1236 		return _PyLong_FromLong(-1);
1237 
1238 	return _PyLong_FromLong(tp_format->id);
1239 #endif // HAVE_LIBTRACEEVENT
1240 }
1241 
1242 static PyMethodDef perf__methods[] = {
1243 	{
1244 		.ml_name  = "tracepoint",
1245 		.ml_meth  = (PyCFunction) pyrf__tracepoint,
1246 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1247 		.ml_doc	  = PyDoc_STR("Get tracepoint config.")
1248 	},
1249 	{ .ml_name = NULL, }
1250 };
1251 
1252 #if PY_MAJOR_VERSION < 3
1253 PyMODINIT_FUNC initperf(void)
1254 #else
1255 PyMODINIT_FUNC PyInit_perf(void)
1256 #endif
1257 {
1258 	PyObject *obj;
1259 	int i;
1260 	PyObject *dict;
1261 #if PY_MAJOR_VERSION < 3
1262 	PyObject *module = Py_InitModule("perf", perf__methods);
1263 #else
1264 	static struct PyModuleDef moduledef = {
1265 		PyModuleDef_HEAD_INIT,
1266 		"perf",			/* m_name */
1267 		"",			/* m_doc */
1268 		-1,			/* m_size */
1269 		perf__methods,		/* m_methods */
1270 		NULL,			/* m_reload */
1271 		NULL,			/* m_traverse */
1272 		NULL,			/* m_clear */
1273 		NULL,			/* m_free */
1274 	};
1275 	PyObject *module = PyModule_Create(&moduledef);
1276 #endif
1277 
1278 	if (module == NULL ||
1279 	    pyrf_event__setup_types() < 0 ||
1280 	    pyrf_evlist__setup_types() < 0 ||
1281 	    pyrf_evsel__setup_types() < 0 ||
1282 	    pyrf_thread_map__setup_types() < 0 ||
1283 	    pyrf_cpu_map__setup_types() < 0)
1284 #if PY_MAJOR_VERSION < 3
1285 		return;
1286 #else
1287 		return module;
1288 #endif
1289 
1290 	/* The page_size is placed in util object. */
1291 	page_size = sysconf(_SC_PAGE_SIZE);
1292 
1293 	Py_INCREF(&pyrf_evlist__type);
1294 	PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
1295 
1296 	Py_INCREF(&pyrf_evsel__type);
1297 	PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
1298 
1299 	Py_INCREF(&pyrf_mmap_event__type);
1300 	PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type);
1301 
1302 	Py_INCREF(&pyrf_lost_event__type);
1303 	PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type);
1304 
1305 	Py_INCREF(&pyrf_comm_event__type);
1306 	PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type);
1307 
1308 	Py_INCREF(&pyrf_task_event__type);
1309 	PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1310 
1311 	Py_INCREF(&pyrf_throttle_event__type);
1312 	PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type);
1313 
1314 	Py_INCREF(&pyrf_task_event__type);
1315 	PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1316 
1317 	Py_INCREF(&pyrf_read_event__type);
1318 	PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type);
1319 
1320 	Py_INCREF(&pyrf_sample_event__type);
1321 	PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type);
1322 
1323 	Py_INCREF(&pyrf_context_switch_event__type);
1324 	PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type);
1325 
1326 	Py_INCREF(&pyrf_thread_map__type);
1327 	PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
1328 
1329 	Py_INCREF(&pyrf_cpu_map__type);
1330 	PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
1331 
1332 	dict = PyModule_GetDict(module);
1333 	if (dict == NULL)
1334 		goto error;
1335 
1336 	for (i = 0; perf__constants[i].name != NULL; i++) {
1337 		obj = _PyLong_FromLong(perf__constants[i].value);
1338 		if (obj == NULL)
1339 			goto error;
1340 		PyDict_SetItemString(dict, perf__constants[i].name, obj);
1341 		Py_DECREF(obj);
1342 	}
1343 
1344 error:
1345 	if (PyErr_Occurred())
1346 		PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
1347 #if PY_MAJOR_VERSION >= 3
1348 	return module;
1349 #endif
1350 }
1351 
1352 
1353 /* The following are stubs to avoid dragging in builtin-* objects. */
1354 /* TODO: move the code out of the builtin-* file into util. */
1355 
1356 unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
1357 
1358 bool kvm_entry_event(struct evsel *evsel __maybe_unused)
1359 {
1360 	return false;
1361 }
1362 
1363 bool kvm_exit_event(struct evsel *evsel __maybe_unused)
1364 {
1365 	return false;
1366 }
1367 
1368 bool exit_event_begin(struct evsel *evsel __maybe_unused,
1369 		      struct perf_sample *sample  __maybe_unused,
1370 		      struct event_key *key  __maybe_unused)
1371 {
1372 	return false;
1373 }
1374 
1375 bool exit_event_end(struct evsel *evsel __maybe_unused,
1376 		    struct perf_sample *sample __maybe_unused,
1377 		    struct event_key *key __maybe_unused)
1378 {
1379 	return false;
1380 }
1381 
1382 void exit_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
1383 			   struct event_key *key __maybe_unused,
1384 			   char *decode __maybe_unused)
1385 {
1386 }
1387 
1388 int find_scripts(char **scripts_array  __maybe_unused, char **scripts_path_array  __maybe_unused,
1389 		int num  __maybe_unused, int pathlen __maybe_unused)
1390 {
1391 	return -1;
1392 }
1393 
1394 void perf_stat__set_no_csv_summary(int set __maybe_unused)
1395 {
1396 }
1397 
1398 void perf_stat__set_big_num(int set __maybe_unused)
1399 {
1400 }
1401 
1402 int script_spec_register(const char *spec __maybe_unused, struct scripting_ops *ops __maybe_unused)
1403 {
1404 	return -1;
1405 }
1406 
1407 arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch __maybe_unused)
1408 {
1409 	return NULL;
1410 }
1411 
1412 struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork __maybe_unused,
1413 				       struct kwork_class *class __maybe_unused,
1414 				       struct kwork_work *key  __maybe_unused)
1415 {
1416 	return NULL;
1417 }
1418 
1419 void script_fetch_insn(struct perf_sample *sample __maybe_unused,
1420 		struct thread *thread __maybe_unused,
1421 		struct machine *machine __maybe_unused)
1422 {
1423 }
1424 
1425 int perf_sample__sprintf_flags(u32 flags __maybe_unused, char *str __maybe_unused,
1426 			size_t sz __maybe_unused)
1427 {
1428 	return -1;
1429 }
1430 
1431 bool match_callstack_filter(struct machine *machine __maybe_unused, u64 *callstack __maybe_unused)
1432 {
1433 	return false;
1434 }
1435 
1436 struct lock_stat *lock_stat_find(u64 addr __maybe_unused)
1437 {
1438 	return NULL;
1439 }
1440 
1441 struct lock_stat *lock_stat_findnew(u64 addr __maybe_unused, const char *name __maybe_unused,
1442 				int flags __maybe_unused)
1443 {
1444 	return NULL;
1445 }
1446 
1447 int cmd_inject(int argc __maybe_unused, const char *argv[] __maybe_unused)
1448 {
1449 	return -1;
1450 }
1451