xref: /linux/tools/perf/util/python.c (revision f4f346c3465949ebba80c6cc52cd8d2eeaa545fd)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <Python.h>
3 #include <structmember.h>
4 #include <inttypes.h>
5 #include <poll.h>
6 #include <linux/err.h>
7 #include <perf/cpumap.h>
8 #ifdef HAVE_LIBTRACEEVENT
9 #include <event-parse.h>
10 #endif
11 #include <perf/mmap.h>
12 #include "callchain.h"
13 #include "counts.h"
14 #include "evlist.h"
15 #include "evsel.h"
16 #include "event.h"
17 #include "print_binary.h"
18 #include "record.h"
19 #include "strbuf.h"
20 #include "thread_map.h"
21 #include "trace-event.h"
22 #include "metricgroup.h"
23 #include "mmap.h"
24 #include "util/sample.h"
25 #include <internal/lib.h>
26 
27 PyMODINIT_FUNC PyInit_perf(void);
28 
29 #define member_def(type, member, ptype, help) \
30 	{ #member, ptype, \
31 	  offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
32 	  0, help }
33 
34 #define sample_member_def(name, member, ptype, help) \
35 	{ #name, ptype, \
36 	  offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
37 	  0, help }
38 
39 struct pyrf_event {
40 	PyObject_HEAD
41 	struct evsel *evsel;
42 	struct perf_sample sample;
43 	union perf_event   event;
44 };
45 
46 #define sample_members \
47 	sample_member_def(sample_ip, ip, T_ULONGLONG, "event ip"),			 \
48 	sample_member_def(sample_pid, pid, T_INT, "event pid"),			 \
49 	sample_member_def(sample_tid, tid, T_INT, "event tid"),			 \
50 	sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"),		 \
51 	sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"),		 \
52 	sample_member_def(sample_id, id, T_ULONGLONG, "event id"),			 \
53 	sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
54 	sample_member_def(sample_period, period, T_ULONGLONG, "event period"),		 \
55 	sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
56 
57 static const char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
58 
59 static PyMemberDef pyrf_mmap_event__members[] = {
60 	sample_members
61 	member_def(perf_event_header, type, T_UINT, "event type"),
62 	member_def(perf_event_header, misc, T_UINT, "event misc"),
63 	member_def(perf_record_mmap, pid, T_UINT, "event pid"),
64 	member_def(perf_record_mmap, tid, T_UINT, "event tid"),
65 	member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"),
66 	member_def(perf_record_mmap, len, T_ULONGLONG, "map length"),
67 	member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"),
68 	member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"),
69 	{ .name = NULL, },
70 };
71 
pyrf_mmap_event__repr(const struct pyrf_event * pevent)72 static PyObject *pyrf_mmap_event__repr(const struct pyrf_event *pevent)
73 {
74 	PyObject *ret;
75 	char *s;
76 
77 	if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", "
78 			 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", "
79 			 "filename: %s }",
80 		     pevent->event.mmap.pid, pevent->event.mmap.tid,
81 		     pevent->event.mmap.start, pevent->event.mmap.len,
82 		     pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
83 		ret = PyErr_NoMemory();
84 	} else {
85 		ret = PyUnicode_FromString(s);
86 		free(s);
87 	}
88 	return ret;
89 }
90 
91 static PyTypeObject pyrf_mmap_event__type = {
92 	PyVarObject_HEAD_INIT(NULL, 0)
93 	.tp_name	= "perf.mmap_event",
94 	.tp_basicsize	= sizeof(struct pyrf_event),
95 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
96 	.tp_doc		= pyrf_mmap_event__doc,
97 	.tp_members	= pyrf_mmap_event__members,
98 	.tp_repr	= (reprfunc)pyrf_mmap_event__repr,
99 };
100 
101 static const char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
102 
103 static PyMemberDef pyrf_task_event__members[] = {
104 	sample_members
105 	member_def(perf_event_header, type, T_UINT, "event type"),
106 	member_def(perf_record_fork, pid, T_UINT, "event pid"),
107 	member_def(perf_record_fork, ppid, T_UINT, "event ppid"),
108 	member_def(perf_record_fork, tid, T_UINT, "event tid"),
109 	member_def(perf_record_fork, ptid, T_UINT, "event ptid"),
110 	member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"),
111 	{ .name = NULL, },
112 };
113 
pyrf_task_event__repr(const struct pyrf_event * pevent)114 static PyObject *pyrf_task_event__repr(const struct pyrf_event *pevent)
115 {
116 	return PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
117 				   "ptid: %u, time: %" PRI_lu64 "}",
118 				   pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
119 				   pevent->event.fork.pid,
120 				   pevent->event.fork.ppid,
121 				   pevent->event.fork.tid,
122 				   pevent->event.fork.ptid,
123 				   pevent->event.fork.time);
124 }
125 
126 static PyTypeObject pyrf_task_event__type = {
127 	PyVarObject_HEAD_INIT(NULL, 0)
128 	.tp_name	= "perf.task_event",
129 	.tp_basicsize	= sizeof(struct pyrf_event),
130 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
131 	.tp_doc		= pyrf_task_event__doc,
132 	.tp_members	= pyrf_task_event__members,
133 	.tp_repr	= (reprfunc)pyrf_task_event__repr,
134 };
135 
136 static const char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
137 
138 static PyMemberDef pyrf_comm_event__members[] = {
139 	sample_members
140 	member_def(perf_event_header, type, T_UINT, "event type"),
141 	member_def(perf_record_comm, pid, T_UINT, "event pid"),
142 	member_def(perf_record_comm, tid, T_UINT, "event tid"),
143 	member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"),
144 	{ .name = NULL, },
145 };
146 
pyrf_comm_event__repr(const struct pyrf_event * pevent)147 static PyObject *pyrf_comm_event__repr(const struct pyrf_event *pevent)
148 {
149 	return PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
150 				   pevent->event.comm.pid,
151 				   pevent->event.comm.tid,
152 				   pevent->event.comm.comm);
153 }
154 
155 static PyTypeObject pyrf_comm_event__type = {
156 	PyVarObject_HEAD_INIT(NULL, 0)
157 	.tp_name	= "perf.comm_event",
158 	.tp_basicsize	= sizeof(struct pyrf_event),
159 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
160 	.tp_doc		= pyrf_comm_event__doc,
161 	.tp_members	= pyrf_comm_event__members,
162 	.tp_repr	= (reprfunc)pyrf_comm_event__repr,
163 };
164 
165 static const char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
166 
167 static PyMemberDef pyrf_throttle_event__members[] = {
168 	sample_members
169 	member_def(perf_event_header, type, T_UINT, "event type"),
170 	member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"),
171 	member_def(perf_record_throttle, id, T_ULONGLONG, "event id"),
172 	member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"),
173 	{ .name = NULL, },
174 };
175 
pyrf_throttle_event__repr(const struct pyrf_event * pevent)176 static PyObject *pyrf_throttle_event__repr(const struct pyrf_event *pevent)
177 {
178 	const struct perf_record_throttle *te = (const struct perf_record_throttle *)
179 		(&pevent->event.header + 1);
180 
181 	return PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64
182 				   ", stream_id: %" PRI_lu64 " }",
183 				   pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
184 				   te->time, te->id, te->stream_id);
185 }
186 
187 static PyTypeObject pyrf_throttle_event__type = {
188 	PyVarObject_HEAD_INIT(NULL, 0)
189 	.tp_name	= "perf.throttle_event",
190 	.tp_basicsize	= sizeof(struct pyrf_event),
191 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
192 	.tp_doc		= pyrf_throttle_event__doc,
193 	.tp_members	= pyrf_throttle_event__members,
194 	.tp_repr	= (reprfunc)pyrf_throttle_event__repr,
195 };
196 
197 static const char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
198 
199 static PyMemberDef pyrf_lost_event__members[] = {
200 	sample_members
201 	member_def(perf_record_lost, id, T_ULONGLONG, "event id"),
202 	member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"),
203 	{ .name = NULL, },
204 };
205 
pyrf_lost_event__repr(const struct pyrf_event * pevent)206 static PyObject *pyrf_lost_event__repr(const struct pyrf_event *pevent)
207 {
208 	PyObject *ret;
209 	char *s;
210 
211 	if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", "
212 			 "lost: %#" PRI_lx64 " }",
213 		     pevent->event.lost.id, pevent->event.lost.lost) < 0) {
214 		ret = PyErr_NoMemory();
215 	} else {
216 		ret = PyUnicode_FromString(s);
217 		free(s);
218 	}
219 	return ret;
220 }
221 
222 static PyTypeObject pyrf_lost_event__type = {
223 	PyVarObject_HEAD_INIT(NULL, 0)
224 	.tp_name	= "perf.lost_event",
225 	.tp_basicsize	= sizeof(struct pyrf_event),
226 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
227 	.tp_doc		= pyrf_lost_event__doc,
228 	.tp_members	= pyrf_lost_event__members,
229 	.tp_repr	= (reprfunc)pyrf_lost_event__repr,
230 };
231 
232 static const char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
233 
234 static PyMemberDef pyrf_read_event__members[] = {
235 	sample_members
236 	member_def(perf_record_read, pid, T_UINT, "event pid"),
237 	member_def(perf_record_read, tid, T_UINT, "event tid"),
238 	{ .name = NULL, },
239 };
240 
pyrf_read_event__repr(const struct pyrf_event * pevent)241 static PyObject *pyrf_read_event__repr(const struct pyrf_event *pevent)
242 {
243 	return PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }",
244 				   pevent->event.read.pid,
245 				   pevent->event.read.tid);
246 	/*
247  	 * FIXME: return the array of read values,
248  	 * making this method useful ;-)
249  	 */
250 }
251 
252 static PyTypeObject pyrf_read_event__type = {
253 	PyVarObject_HEAD_INIT(NULL, 0)
254 	.tp_name	= "perf.read_event",
255 	.tp_basicsize	= sizeof(struct pyrf_event),
256 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
257 	.tp_doc		= pyrf_read_event__doc,
258 	.tp_members	= pyrf_read_event__members,
259 	.tp_repr	= (reprfunc)pyrf_read_event__repr,
260 };
261 
262 static const char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
263 
264 static PyMemberDef pyrf_sample_event__members[] = {
265 	sample_members
266 	member_def(perf_event_header, type, T_UINT, "event type"),
267 	{ .name = NULL, },
268 };
269 
pyrf_sample_event__delete(struct pyrf_event * pevent)270 static void pyrf_sample_event__delete(struct pyrf_event *pevent)
271 {
272 	perf_sample__exit(&pevent->sample);
273 	Py_TYPE(pevent)->tp_free((PyObject*)pevent);
274 }
275 
pyrf_sample_event__repr(const struct pyrf_event * pevent)276 static PyObject *pyrf_sample_event__repr(const struct pyrf_event *pevent)
277 {
278 	PyObject *ret;
279 	char *s;
280 
281 	if (asprintf(&s, "{ type: sample }") < 0) {
282 		ret = PyErr_NoMemory();
283 	} else {
284 		ret = PyUnicode_FromString(s);
285 		free(s);
286 	}
287 	return ret;
288 }
289 
290 #ifdef HAVE_LIBTRACEEVENT
is_tracepoint(const struct pyrf_event * pevent)291 static bool is_tracepoint(const struct pyrf_event *pevent)
292 {
293 	return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT;
294 }
295 
296 static PyObject*
tracepoint_field(const struct pyrf_event * pe,struct tep_format_field * field)297 tracepoint_field(const struct pyrf_event *pe, struct tep_format_field *field)
298 {
299 	struct tep_handle *pevent = field->event->tep;
300 	void *data = pe->sample.raw_data;
301 	PyObject *ret = NULL;
302 	unsigned long long val;
303 	unsigned int offset, len;
304 
305 	if (field->flags & TEP_FIELD_IS_ARRAY) {
306 		offset = field->offset;
307 		len    = field->size;
308 		if (field->flags & TEP_FIELD_IS_DYNAMIC) {
309 			val     = tep_read_number(pevent, data + offset, len);
310 			offset  = val;
311 			len     = offset >> 16;
312 			offset &= 0xffff;
313 			if (tep_field_is_relative(field->flags))
314 				offset += field->offset + field->size;
315 		}
316 		if (field->flags & TEP_FIELD_IS_STRING &&
317 		    is_printable_array(data + offset, len)) {
318 			ret = PyUnicode_FromString((char *)data + offset);
319 		} else {
320 			ret = PyByteArray_FromStringAndSize((const char *) data + offset, len);
321 			field->flags &= ~TEP_FIELD_IS_STRING;
322 		}
323 	} else {
324 		val = tep_read_number(pevent, data + field->offset,
325 				      field->size);
326 		if (field->flags & TEP_FIELD_IS_POINTER)
327 			ret = PyLong_FromUnsignedLong((unsigned long) val);
328 		else if (field->flags & TEP_FIELD_IS_SIGNED)
329 			ret = PyLong_FromLong((long) val);
330 		else
331 			ret = PyLong_FromUnsignedLong((unsigned long) val);
332 	}
333 
334 	return ret;
335 }
336 
337 static PyObject*
get_tracepoint_field(struct pyrf_event * pevent,PyObject * attr_name)338 get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
339 {
340 	struct evsel *evsel = pevent->evsel;
341 	struct tep_event *tp_format = evsel__tp_format(evsel);
342 	struct tep_format_field *field;
343 
344 	if (IS_ERR_OR_NULL(tp_format))
345 		return NULL;
346 
347 	PyObject *obj = PyObject_Str(attr_name);
348 	if (obj == NULL)
349 		return NULL;
350 
351 	const char *str = PyUnicode_AsUTF8(obj);
352 	if (str == NULL) {
353 		Py_DECREF(obj);
354 		return NULL;
355 	}
356 
357 	field = tep_find_any_field(tp_format, str);
358 	Py_DECREF(obj);
359 	return field ? tracepoint_field(pevent, field) : NULL;
360 }
361 #endif /* HAVE_LIBTRACEEVENT */
362 
363 static PyObject*
pyrf_sample_event__getattro(struct pyrf_event * pevent,PyObject * attr_name)364 pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name)
365 {
366 	PyObject *obj = NULL;
367 
368 #ifdef HAVE_LIBTRACEEVENT
369 	if (is_tracepoint(pevent))
370 		obj = get_tracepoint_field(pevent, attr_name);
371 #endif
372 
373 	return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name);
374 }
375 
376 static PyTypeObject pyrf_sample_event__type = {
377 	PyVarObject_HEAD_INIT(NULL, 0)
378 	.tp_name	= "perf.sample_event",
379 	.tp_basicsize	= sizeof(struct pyrf_event),
380 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
381 	.tp_doc		= pyrf_sample_event__doc,
382 	.tp_members	= pyrf_sample_event__members,
383 	.tp_repr	= (reprfunc)pyrf_sample_event__repr,
384 	.tp_getattro	= (getattrofunc) pyrf_sample_event__getattro,
385 };
386 
387 static const char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
388 
389 static PyMemberDef pyrf_context_switch_event__members[] = {
390 	sample_members
391 	member_def(perf_event_header, type, T_UINT, "event type"),
392 	member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"),
393 	member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"),
394 	{ .name = NULL, },
395 };
396 
pyrf_context_switch_event__repr(const struct pyrf_event * pevent)397 static PyObject *pyrf_context_switch_event__repr(const struct pyrf_event *pevent)
398 {
399 	PyObject *ret;
400 	char *s;
401 
402 	if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }",
403 		     pevent->event.context_switch.next_prev_pid,
404 		     pevent->event.context_switch.next_prev_tid,
405 		     !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) {
406 		ret = PyErr_NoMemory();
407 	} else {
408 		ret = PyUnicode_FromString(s);
409 		free(s);
410 	}
411 	return ret;
412 }
413 
414 static PyTypeObject pyrf_context_switch_event__type = {
415 	PyVarObject_HEAD_INIT(NULL, 0)
416 	.tp_name	= "perf.context_switch_event",
417 	.tp_basicsize	= sizeof(struct pyrf_event),
418 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
419 	.tp_doc		= pyrf_context_switch_event__doc,
420 	.tp_members	= pyrf_context_switch_event__members,
421 	.tp_repr	= (reprfunc)pyrf_context_switch_event__repr,
422 };
423 
pyrf_event__setup_types(void)424 static int pyrf_event__setup_types(void)
425 {
426 	int err;
427 	pyrf_mmap_event__type.tp_new =
428 	pyrf_task_event__type.tp_new =
429 	pyrf_comm_event__type.tp_new =
430 	pyrf_lost_event__type.tp_new =
431 	pyrf_read_event__type.tp_new =
432 	pyrf_sample_event__type.tp_new =
433 	pyrf_context_switch_event__type.tp_new =
434 	pyrf_throttle_event__type.tp_new = PyType_GenericNew;
435 
436 	pyrf_sample_event__type.tp_dealloc = (destructor)pyrf_sample_event__delete,
437 
438 	err = PyType_Ready(&pyrf_mmap_event__type);
439 	if (err < 0)
440 		goto out;
441 	err = PyType_Ready(&pyrf_lost_event__type);
442 	if (err < 0)
443 		goto out;
444 	err = PyType_Ready(&pyrf_task_event__type);
445 	if (err < 0)
446 		goto out;
447 	err = PyType_Ready(&pyrf_comm_event__type);
448 	if (err < 0)
449 		goto out;
450 	err = PyType_Ready(&pyrf_throttle_event__type);
451 	if (err < 0)
452 		goto out;
453 	err = PyType_Ready(&pyrf_read_event__type);
454 	if (err < 0)
455 		goto out;
456 	err = PyType_Ready(&pyrf_sample_event__type);
457 	if (err < 0)
458 		goto out;
459 	err = PyType_Ready(&pyrf_context_switch_event__type);
460 	if (err < 0)
461 		goto out;
462 out:
463 	return err;
464 }
465 
466 static PyTypeObject *pyrf_event__type[] = {
467 	[PERF_RECORD_MMAP]	 = &pyrf_mmap_event__type,
468 	[PERF_RECORD_LOST]	 = &pyrf_lost_event__type,
469 	[PERF_RECORD_COMM]	 = &pyrf_comm_event__type,
470 	[PERF_RECORD_EXIT]	 = &pyrf_task_event__type,
471 	[PERF_RECORD_THROTTLE]	 = &pyrf_throttle_event__type,
472 	[PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
473 	[PERF_RECORD_FORK]	 = &pyrf_task_event__type,
474 	[PERF_RECORD_READ]	 = &pyrf_read_event__type,
475 	[PERF_RECORD_SAMPLE]	 = &pyrf_sample_event__type,
476 	[PERF_RECORD_SWITCH]	 = &pyrf_context_switch_event__type,
477 	[PERF_RECORD_SWITCH_CPU_WIDE]  = &pyrf_context_switch_event__type,
478 };
479 
pyrf_event__new(const union perf_event * event)480 static PyObject *pyrf_event__new(const union perf_event *event)
481 {
482 	struct pyrf_event *pevent;
483 	PyTypeObject *ptype;
484 
485 	if ((event->header.type < PERF_RECORD_MMAP ||
486 	     event->header.type > PERF_RECORD_SAMPLE) &&
487 	    !(event->header.type == PERF_RECORD_SWITCH ||
488 	      event->header.type == PERF_RECORD_SWITCH_CPU_WIDE))
489 		return NULL;
490 
491 	// FIXME this better be dynamic or we need to parse everything
492 	// before calling perf_mmap__consume(), including tracepoint fields.
493 	if (sizeof(pevent->event) < event->header.size)
494 		return NULL;
495 
496 	ptype = pyrf_event__type[event->header.type];
497 	pevent = PyObject_New(struct pyrf_event, ptype);
498 	if (pevent != NULL)
499 		memcpy(&pevent->event, event, event->header.size);
500 	return (PyObject *)pevent;
501 }
502 
503 struct pyrf_cpu_map {
504 	PyObject_HEAD
505 
506 	struct perf_cpu_map *cpus;
507 };
508 
pyrf_cpu_map__init(struct pyrf_cpu_map * pcpus,PyObject * args,PyObject * kwargs)509 static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
510 			      PyObject *args, PyObject *kwargs)
511 {
512 	static char *kwlist[] = { "cpustr", NULL };
513 	char *cpustr = NULL;
514 
515 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
516 					 kwlist, &cpustr))
517 		return -1;
518 
519 	pcpus->cpus = perf_cpu_map__new(cpustr);
520 	if (pcpus->cpus == NULL)
521 		return -1;
522 	return 0;
523 }
524 
pyrf_cpu_map__delete(struct pyrf_cpu_map * pcpus)525 static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
526 {
527 	perf_cpu_map__put(pcpus->cpus);
528 	Py_TYPE(pcpus)->tp_free((PyObject*)pcpus);
529 }
530 
pyrf_cpu_map__length(PyObject * obj)531 static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
532 {
533 	struct pyrf_cpu_map *pcpus = (void *)obj;
534 
535 	return perf_cpu_map__nr(pcpus->cpus);
536 }
537 
pyrf_cpu_map__item(PyObject * obj,Py_ssize_t i)538 static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
539 {
540 	struct pyrf_cpu_map *pcpus = (void *)obj;
541 
542 	if (i >= perf_cpu_map__nr(pcpus->cpus)) {
543 		PyErr_SetString(PyExc_IndexError, "Index out of range");
544 		return NULL;
545 	}
546 
547 	return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu);
548 }
549 
550 static PySequenceMethods pyrf_cpu_map__sequence_methods = {
551 	.sq_length = pyrf_cpu_map__length,
552 	.sq_item   = pyrf_cpu_map__item,
553 };
554 
555 static const char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
556 
557 static PyTypeObject pyrf_cpu_map__type = {
558 	PyVarObject_HEAD_INIT(NULL, 0)
559 	.tp_name	= "perf.cpu_map",
560 	.tp_basicsize	= sizeof(struct pyrf_cpu_map),
561 	.tp_dealloc	= (destructor)pyrf_cpu_map__delete,
562 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
563 	.tp_doc		= pyrf_cpu_map__doc,
564 	.tp_as_sequence	= &pyrf_cpu_map__sequence_methods,
565 	.tp_init	= (initproc)pyrf_cpu_map__init,
566 };
567 
pyrf_cpu_map__setup_types(void)568 static int pyrf_cpu_map__setup_types(void)
569 {
570 	pyrf_cpu_map__type.tp_new = PyType_GenericNew;
571 	return PyType_Ready(&pyrf_cpu_map__type);
572 }
573 
574 struct pyrf_thread_map {
575 	PyObject_HEAD
576 
577 	struct perf_thread_map *threads;
578 };
579 
pyrf_thread_map__init(struct pyrf_thread_map * pthreads,PyObject * args,PyObject * kwargs)580 static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
581 				 PyObject *args, PyObject *kwargs)
582 {
583 	static char *kwlist[] = { "pid", "tid", NULL };
584 	int pid = -1, tid = -1;
585 
586 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii",
587 					 kwlist, &pid, &tid))
588 		return -1;
589 
590 	pthreads->threads = thread_map__new(pid, tid);
591 	if (pthreads->threads == NULL)
592 		return -1;
593 	return 0;
594 }
595 
pyrf_thread_map__delete(struct pyrf_thread_map * pthreads)596 static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
597 {
598 	perf_thread_map__put(pthreads->threads);
599 	Py_TYPE(pthreads)->tp_free((PyObject*)pthreads);
600 }
601 
pyrf_thread_map__length(PyObject * obj)602 static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
603 {
604 	struct pyrf_thread_map *pthreads = (void *)obj;
605 
606 	return perf_thread_map__nr(pthreads->threads);
607 }
608 
pyrf_thread_map__item(PyObject * obj,Py_ssize_t i)609 static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
610 {
611 	struct pyrf_thread_map *pthreads = (void *)obj;
612 
613 	if (i >= perf_thread_map__nr(pthreads->threads)) {
614 		PyErr_SetString(PyExc_IndexError, "Index out of range");
615 		return NULL;
616 	}
617 
618 	return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i));
619 }
620 
621 static PySequenceMethods pyrf_thread_map__sequence_methods = {
622 	.sq_length = pyrf_thread_map__length,
623 	.sq_item   = pyrf_thread_map__item,
624 };
625 
626 static const char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
627 
628 static PyTypeObject pyrf_thread_map__type = {
629 	PyVarObject_HEAD_INIT(NULL, 0)
630 	.tp_name	= "perf.thread_map",
631 	.tp_basicsize	= sizeof(struct pyrf_thread_map),
632 	.tp_dealloc	= (destructor)pyrf_thread_map__delete,
633 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
634 	.tp_doc		= pyrf_thread_map__doc,
635 	.tp_as_sequence	= &pyrf_thread_map__sequence_methods,
636 	.tp_init	= (initproc)pyrf_thread_map__init,
637 };
638 
pyrf_thread_map__setup_types(void)639 static int pyrf_thread_map__setup_types(void)
640 {
641 	pyrf_thread_map__type.tp_new = PyType_GenericNew;
642 	return PyType_Ready(&pyrf_thread_map__type);
643 }
644 
645 struct pyrf_counts_values {
646 	PyObject_HEAD
647 
648 	struct perf_counts_values values;
649 };
650 
651 static const char pyrf_counts_values__doc[] = PyDoc_STR("perf counts values object.");
652 
pyrf_counts_values__delete(struct pyrf_counts_values * pcounts_values)653 static void pyrf_counts_values__delete(struct pyrf_counts_values *pcounts_values)
654 {
655 	Py_TYPE(pcounts_values)->tp_free((PyObject *)pcounts_values);
656 }
657 
658 #define counts_values_member_def(member, ptype, help) \
659 	{ #member, ptype, \
660 	  offsetof(struct pyrf_counts_values, values.member), \
661 	  0, help }
662 
663 static PyMemberDef pyrf_counts_values_members[] = {
664 	counts_values_member_def(val, T_ULONG, "Value of event"),
665 	counts_values_member_def(ena, T_ULONG, "Time for which enabled"),
666 	counts_values_member_def(run, T_ULONG, "Time for which running"),
667 	counts_values_member_def(id, T_ULONG, "Unique ID for an event"),
668 	counts_values_member_def(lost, T_ULONG, "Num of lost samples"),
669 	{ .name = NULL, },
670 };
671 
pyrf_counts_values_get_values(struct pyrf_counts_values * self,void * closure)672 static PyObject *pyrf_counts_values_get_values(struct pyrf_counts_values *self, void *closure)
673 {
674 	PyObject *vals = PyList_New(5);
675 
676 	if (!vals)
677 		return NULL;
678 	for (int i = 0; i < 5; i++)
679 		PyList_SetItem(vals, i, PyLong_FromLong(self->values.values[i]));
680 
681 	return vals;
682 }
683 
pyrf_counts_values_set_values(struct pyrf_counts_values * self,PyObject * list,void * closure)684 static int pyrf_counts_values_set_values(struct pyrf_counts_values *self, PyObject *list,
685 					 void *closure)
686 {
687 	Py_ssize_t size;
688 	PyObject *item = NULL;
689 
690 	if (!PyList_Check(list)) {
691 		PyErr_SetString(PyExc_TypeError, "Value assigned must be a list");
692 		return -1;
693 	}
694 
695 	size = PyList_Size(list);
696 	for (Py_ssize_t i = 0; i < size; i++) {
697 		item = PyList_GetItem(list, i);
698 		if (!PyLong_Check(item)) {
699 			PyErr_SetString(PyExc_TypeError, "List members should be numbers");
700 			return -1;
701 		}
702 		self->values.values[i] = PyLong_AsLong(item);
703 	}
704 
705 	return 0;
706 }
707 
708 static PyGetSetDef pyrf_counts_values_getset[] = {
709 	{"values", (getter)pyrf_counts_values_get_values, (setter)pyrf_counts_values_set_values,
710 		"Name field", NULL},
711 	{ .name = NULL, },
712 };
713 
714 static PyTypeObject pyrf_counts_values__type = {
715 	PyVarObject_HEAD_INIT(NULL, 0)
716 	.tp_name	= "perf.counts_values",
717 	.tp_basicsize	= sizeof(struct pyrf_counts_values),
718 	.tp_dealloc	= (destructor)pyrf_counts_values__delete,
719 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
720 	.tp_doc		= pyrf_counts_values__doc,
721 	.tp_members	= pyrf_counts_values_members,
722 	.tp_getset	= pyrf_counts_values_getset,
723 };
724 
pyrf_counts_values__setup_types(void)725 static int pyrf_counts_values__setup_types(void)
726 {
727 	pyrf_counts_values__type.tp_new = PyType_GenericNew;
728 	return PyType_Ready(&pyrf_counts_values__type);
729 }
730 
731 struct pyrf_evsel {
732 	PyObject_HEAD
733 
734 	struct evsel evsel;
735 };
736 
pyrf_evsel__init(struct pyrf_evsel * pevsel,PyObject * args,PyObject * kwargs)737 static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
738 			    PyObject *args, PyObject *kwargs)
739 {
740 	struct perf_event_attr attr = {
741 		.type = PERF_TYPE_HARDWARE,
742 		.config = PERF_COUNT_HW_CPU_CYCLES,
743 		.sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
744 	};
745 	static char *kwlist[] = {
746 		"type",
747 		"config",
748 		"sample_freq",
749 		"sample_period",
750 		"sample_type",
751 		"read_format",
752 		"disabled",
753 		"inherit",
754 		"pinned",
755 		"exclusive",
756 		"exclude_user",
757 		"exclude_kernel",
758 		"exclude_hv",
759 		"exclude_idle",
760 		"mmap",
761 		"context_switch",
762 		"comm",
763 		"freq",
764 		"inherit_stat",
765 		"enable_on_exec",
766 		"task",
767 		"watermark",
768 		"precise_ip",
769 		"mmap_data",
770 		"sample_id_all",
771 		"wakeup_events",
772 		"bp_type",
773 		"bp_addr",
774 		"bp_len",
775 		 NULL
776 	};
777 	u64 sample_period = 0;
778 	u32 disabled = 0,
779 	    inherit = 0,
780 	    pinned = 0,
781 	    exclusive = 0,
782 	    exclude_user = 0,
783 	    exclude_kernel = 0,
784 	    exclude_hv = 0,
785 	    exclude_idle = 0,
786 	    mmap = 0,
787 	    context_switch = 0,
788 	    comm = 0,
789 	    freq = 1,
790 	    inherit_stat = 0,
791 	    enable_on_exec = 0,
792 	    task = 0,
793 	    watermark = 0,
794 	    precise_ip = 0,
795 	    mmap_data = 0,
796 	    sample_id_all = 1;
797 	int idx = 0;
798 
799 	if (!PyArg_ParseTupleAndKeywords(args, kwargs,
800 					 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
801 					 &attr.type, &attr.config, &attr.sample_freq,
802 					 &sample_period, &attr.sample_type,
803 					 &attr.read_format, &disabled, &inherit,
804 					 &pinned, &exclusive, &exclude_user,
805 					 &exclude_kernel, &exclude_hv, &exclude_idle,
806 					 &mmap, &context_switch, &comm, &freq, &inherit_stat,
807 					 &enable_on_exec, &task, &watermark,
808 					 &precise_ip, &mmap_data, &sample_id_all,
809 					 &attr.wakeup_events, &attr.bp_type,
810 					 &attr.bp_addr, &attr.bp_len, &idx))
811 		return -1;
812 
813 	/* union... */
814 	if (sample_period != 0) {
815 		if (attr.sample_freq != 0)
816 			return -1; /* FIXME: throw right exception */
817 		attr.sample_period = sample_period;
818 	}
819 
820 	/* Bitfields */
821 	attr.disabled	    = disabled;
822 	attr.inherit	    = inherit;
823 	attr.pinned	    = pinned;
824 	attr.exclusive	    = exclusive;
825 	attr.exclude_user   = exclude_user;
826 	attr.exclude_kernel = exclude_kernel;
827 	attr.exclude_hv	    = exclude_hv;
828 	attr.exclude_idle   = exclude_idle;
829 	attr.mmap	    = mmap;
830 	attr.context_switch = context_switch;
831 	attr.comm	    = comm;
832 	attr.freq	    = freq;
833 	attr.inherit_stat   = inherit_stat;
834 	attr.enable_on_exec = enable_on_exec;
835 	attr.task	    = task;
836 	attr.watermark	    = watermark;
837 	attr.precise_ip	    = precise_ip;
838 	attr.mmap_data	    = mmap_data;
839 	attr.sample_id_all  = sample_id_all;
840 	attr.size	    = sizeof(attr);
841 
842 	evsel__init(&pevsel->evsel, &attr, idx);
843 	return 0;
844 }
845 
pyrf_evsel__delete(struct pyrf_evsel * pevsel)846 static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
847 {
848 	evsel__exit(&pevsel->evsel);
849 	Py_TYPE(pevsel)->tp_free((PyObject*)pevsel);
850 }
851 
pyrf_evsel__open(struct pyrf_evsel * pevsel,PyObject * args,PyObject * kwargs)852 static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
853 				  PyObject *args, PyObject *kwargs)
854 {
855 	struct evsel *evsel = &pevsel->evsel;
856 	struct perf_cpu_map *cpus = NULL;
857 	struct perf_thread_map *threads = NULL;
858 	PyObject *pcpus = NULL, *pthreads = NULL;
859 	int group = 0, inherit = 0;
860 	static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL };
861 
862 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
863 					 &pcpus, &pthreads, &group, &inherit))
864 		return NULL;
865 
866 	if (pthreads != NULL)
867 		threads = ((struct pyrf_thread_map *)pthreads)->threads;
868 
869 	if (pcpus != NULL)
870 		cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
871 
872 	evsel->core.attr.inherit = inherit;
873 	/*
874 	 * This will group just the fds for this single evsel, to group
875 	 * multiple events, use evlist.open().
876 	 */
877 	if (evsel__open(evsel, cpus, threads) < 0) {
878 		PyErr_SetFromErrno(PyExc_OSError);
879 		return NULL;
880 	}
881 
882 	Py_INCREF(Py_None);
883 	return Py_None;
884 }
885 
pyrf_evsel__cpus(struct pyrf_evsel * pevsel)886 static PyObject *pyrf_evsel__cpus(struct pyrf_evsel *pevsel)
887 {
888 	struct pyrf_cpu_map *pcpu_map = PyObject_New(struct pyrf_cpu_map, &pyrf_cpu_map__type);
889 
890 	if (pcpu_map)
891 		pcpu_map->cpus = perf_cpu_map__get(pevsel->evsel.core.cpus);
892 
893 	return (PyObject *)pcpu_map;
894 }
895 
pyrf_evsel__threads(struct pyrf_evsel * pevsel)896 static PyObject *pyrf_evsel__threads(struct pyrf_evsel *pevsel)
897 {
898 	struct pyrf_thread_map *pthread_map =
899 		PyObject_New(struct pyrf_thread_map, &pyrf_thread_map__type);
900 
901 	if (pthread_map)
902 		pthread_map->threads = perf_thread_map__get(pevsel->evsel.core.threads);
903 
904 	return (PyObject *)pthread_map;
905 }
906 
907 /*
908  * Ensure evsel's counts and prev_raw_counts are allocated, the latter
909  * used by tool PMUs to compute the cumulative count as expected by
910  * stat's process_counter_values.
911  */
evsel__ensure_counts(struct evsel * evsel)912 static int evsel__ensure_counts(struct evsel *evsel)
913 {
914 	int nthreads, ncpus;
915 
916 	if (evsel->counts != NULL)
917 		return 0;
918 
919 	nthreads = perf_thread_map__nr(evsel->core.threads);
920 	ncpus = perf_cpu_map__nr(evsel->core.cpus);
921 
922 	evsel->counts = perf_counts__new(ncpus, nthreads);
923 	if (evsel->counts == NULL)
924 		return -ENOMEM;
925 
926 	evsel->prev_raw_counts = perf_counts__new(ncpus, nthreads);
927 	if (evsel->prev_raw_counts == NULL)
928 		return -ENOMEM;
929 
930 	return 0;
931 }
932 
pyrf_evsel__read(struct pyrf_evsel * pevsel,PyObject * args,PyObject * kwargs)933 static PyObject *pyrf_evsel__read(struct pyrf_evsel *pevsel,
934 				  PyObject *args, PyObject *kwargs)
935 {
936 	struct evsel *evsel = &pevsel->evsel;
937 	int cpu = 0, cpu_idx, thread = 0, thread_idx;
938 	struct perf_counts_values *old_count, *new_count;
939 	struct pyrf_counts_values *count_values = PyObject_New(struct pyrf_counts_values,
940 							       &pyrf_counts_values__type);
941 
942 	if (!count_values)
943 		return NULL;
944 
945 	if (!PyArg_ParseTuple(args, "ii", &cpu, &thread))
946 		return NULL;
947 
948 	cpu_idx = perf_cpu_map__idx(evsel->core.cpus, (struct perf_cpu){.cpu = cpu});
949 	if (cpu_idx < 0) {
950 		PyErr_Format(PyExc_TypeError, "CPU %d is not part of evsel's CPUs", cpu);
951 		return NULL;
952 	}
953 	thread_idx = perf_thread_map__idx(evsel->core.threads, thread);
954 	if (thread_idx < 0) {
955 		PyErr_Format(PyExc_TypeError, "Thread %d is not part of evsel's threads",
956 			     thread);
957 		return NULL;
958 	}
959 
960 	if (evsel__ensure_counts(evsel))
961 		return PyErr_NoMemory();
962 
963 	/* Set up pointers to the old and newly read counter values. */
964 	old_count = perf_counts(evsel->prev_raw_counts, cpu_idx, thread_idx);
965 	new_count = perf_counts(evsel->counts, cpu_idx, thread_idx);
966 	/* Update the value in evsel->counts. */
967 	evsel__read_counter(evsel, cpu_idx, thread_idx);
968 	/* Copy the value and turn it into the delta from old_count. */
969 	count_values->values = *new_count;
970 	count_values->values.val -= old_count->val;
971 	count_values->values.ena -= old_count->ena;
972 	count_values->values.run -= old_count->run;
973 	/* Save the new count over the old_count for the next read. */
974 	*old_count = *new_count;
975 	return (PyObject *)count_values;
976 }
977 
pyrf_evsel__str(PyObject * self)978 static PyObject *pyrf_evsel__str(PyObject *self)
979 {
980 	struct pyrf_evsel *pevsel = (void *)self;
981 	struct evsel *evsel = &pevsel->evsel;
982 
983 	return PyUnicode_FromFormat("evsel(%s/%s/)", evsel__pmu_name(evsel), evsel__name(evsel));
984 }
985 
986 static PyMethodDef pyrf_evsel__methods[] = {
987 	{
988 		.ml_name  = "open",
989 		.ml_meth  = (PyCFunction)pyrf_evsel__open,
990 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
991 		.ml_doc	  = PyDoc_STR("open the event selector file descriptor table.")
992 	},
993 	{
994 		.ml_name  = "cpus",
995 		.ml_meth  = (PyCFunction)pyrf_evsel__cpus,
996 		.ml_flags = METH_NOARGS,
997 		.ml_doc	  = PyDoc_STR("CPUs the event is to be used with.")
998 	},
999 	{
1000 		.ml_name  = "threads",
1001 		.ml_meth  = (PyCFunction)pyrf_evsel__threads,
1002 		.ml_flags = METH_NOARGS,
1003 		.ml_doc	  = PyDoc_STR("threads the event is to be used with.")
1004 	},
1005 	{
1006 		.ml_name  = "read",
1007 		.ml_meth  = (PyCFunction)pyrf_evsel__read,
1008 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1009 		.ml_doc	  = PyDoc_STR("read counters")
1010 	},
1011 	{ .ml_name = NULL, }
1012 };
1013 
1014 #define evsel_member_def(member, ptype, help) \
1015 	{ #member, ptype, \
1016 	  offsetof(struct pyrf_evsel, evsel.member), \
1017 	  0, help }
1018 
1019 #define evsel_attr_member_def(member, ptype, help) \
1020 	{ #member, ptype, \
1021 	  offsetof(struct pyrf_evsel, evsel.core.attr.member), \
1022 	  0, help }
1023 
1024 static PyMemberDef pyrf_evsel__members[] = {
1025 	evsel_member_def(tracking, T_BOOL, "tracking event."),
1026 	evsel_attr_member_def(type, T_UINT, "attribute type."),
1027 	evsel_attr_member_def(size, T_UINT, "attribute size."),
1028 	evsel_attr_member_def(config, T_ULONGLONG, "attribute config."),
1029 	evsel_attr_member_def(sample_period, T_ULONGLONG, "attribute sample_period."),
1030 	evsel_attr_member_def(sample_type, T_ULONGLONG, "attribute sample_type."),
1031 	evsel_attr_member_def(read_format, T_ULONGLONG, "attribute read_format."),
1032 	evsel_attr_member_def(wakeup_events, T_UINT, "attribute wakeup_events."),
1033 	{ .name = NULL, },
1034 };
1035 
1036 static const char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
1037 
1038 static PyTypeObject pyrf_evsel__type = {
1039 	PyVarObject_HEAD_INIT(NULL, 0)
1040 	.tp_name	= "perf.evsel",
1041 	.tp_basicsize	= sizeof(struct pyrf_evsel),
1042 	.tp_dealloc	= (destructor)pyrf_evsel__delete,
1043 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
1044 	.tp_doc		= pyrf_evsel__doc,
1045 	.tp_members	= pyrf_evsel__members,
1046 	.tp_methods	= pyrf_evsel__methods,
1047 	.tp_init	= (initproc)pyrf_evsel__init,
1048 	.tp_str         = pyrf_evsel__str,
1049 	.tp_repr        = pyrf_evsel__str,
1050 };
1051 
pyrf_evsel__setup_types(void)1052 static int pyrf_evsel__setup_types(void)
1053 {
1054 	pyrf_evsel__type.tp_new = PyType_GenericNew;
1055 	return PyType_Ready(&pyrf_evsel__type);
1056 }
1057 
1058 struct pyrf_evlist {
1059 	PyObject_HEAD
1060 
1061 	struct evlist evlist;
1062 };
1063 
pyrf_evlist__init(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs __maybe_unused)1064 static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
1065 			     PyObject *args, PyObject *kwargs __maybe_unused)
1066 {
1067 	PyObject *pcpus = NULL, *pthreads = NULL;
1068 	struct perf_cpu_map *cpus;
1069 	struct perf_thread_map *threads;
1070 
1071 	if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
1072 		return -1;
1073 
1074 	threads = ((struct pyrf_thread_map *)pthreads)->threads;
1075 	cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
1076 	evlist__init(&pevlist->evlist, cpus, threads);
1077 	return 0;
1078 }
1079 
pyrf_evlist__delete(struct pyrf_evlist * pevlist)1080 static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
1081 {
1082 	evlist__exit(&pevlist->evlist);
1083 	Py_TYPE(pevlist)->tp_free((PyObject*)pevlist);
1084 }
1085 
pyrf_evlist__all_cpus(struct pyrf_evlist * pevlist)1086 static PyObject *pyrf_evlist__all_cpus(struct pyrf_evlist *pevlist)
1087 {
1088 	struct pyrf_cpu_map *pcpu_map = PyObject_New(struct pyrf_cpu_map, &pyrf_cpu_map__type);
1089 
1090 	if (pcpu_map)
1091 		pcpu_map->cpus = perf_cpu_map__get(pevlist->evlist.core.all_cpus);
1092 
1093 	return (PyObject *)pcpu_map;
1094 }
1095 
pyrf_evlist__mmap(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)1096 static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
1097 				   PyObject *args, PyObject *kwargs)
1098 {
1099 	struct evlist *evlist = &pevlist->evlist;
1100 	static char *kwlist[] = { "pages", "overwrite", NULL };
1101 	int pages = 128, overwrite = false;
1102 
1103 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
1104 					 &pages, &overwrite))
1105 		return NULL;
1106 
1107 	if (evlist__mmap(evlist, pages) < 0) {
1108 		PyErr_SetFromErrno(PyExc_OSError);
1109 		return NULL;
1110 	}
1111 
1112 	Py_INCREF(Py_None);
1113 	return Py_None;
1114 }
1115 
pyrf_evlist__poll(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)1116 static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
1117 				   PyObject *args, PyObject *kwargs)
1118 {
1119 	struct evlist *evlist = &pevlist->evlist;
1120 	static char *kwlist[] = { "timeout", NULL };
1121 	int timeout = -1, n;
1122 
1123 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
1124 		return NULL;
1125 
1126 	n = evlist__poll(evlist, timeout);
1127 	if (n < 0) {
1128 		PyErr_SetFromErrno(PyExc_OSError);
1129 		return NULL;
1130 	}
1131 
1132 	return Py_BuildValue("i", n);
1133 }
1134 
pyrf_evlist__get_pollfd(struct pyrf_evlist * pevlist,PyObject * args __maybe_unused,PyObject * kwargs __maybe_unused)1135 static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
1136 					 PyObject *args __maybe_unused,
1137 					 PyObject *kwargs __maybe_unused)
1138 {
1139 	struct evlist *evlist = &pevlist->evlist;
1140         PyObject *list = PyList_New(0);
1141 	int i;
1142 
1143 	for (i = 0; i < evlist->core.pollfd.nr; ++i) {
1144 		PyObject *file;
1145 		file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1,
1146 				     NULL, NULL, NULL, 0);
1147 		if (file == NULL)
1148 			goto free_list;
1149 
1150 		if (PyList_Append(list, file) != 0) {
1151 			Py_DECREF(file);
1152 			goto free_list;
1153 		}
1154 
1155 		Py_DECREF(file);
1156 	}
1157 
1158 	return list;
1159 free_list:
1160 	return PyErr_NoMemory();
1161 }
1162 
1163 
pyrf_evlist__add(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs __maybe_unused)1164 static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
1165 				  PyObject *args,
1166 				  PyObject *kwargs __maybe_unused)
1167 {
1168 	struct evlist *evlist = &pevlist->evlist;
1169 	PyObject *pevsel;
1170 	struct evsel *evsel;
1171 
1172 	if (!PyArg_ParseTuple(args, "O", &pevsel))
1173 		return NULL;
1174 
1175 	Py_INCREF(pevsel);
1176 	evsel = &((struct pyrf_evsel *)pevsel)->evsel;
1177 	evsel->core.idx = evlist->core.nr_entries;
1178 	evlist__add(evlist, evsel);
1179 
1180 	return Py_BuildValue("i", evlist->core.nr_entries);
1181 }
1182 
get_md(struct evlist * evlist,int cpu)1183 static struct mmap *get_md(struct evlist *evlist, int cpu)
1184 {
1185 	int i;
1186 
1187 	for (i = 0; i < evlist->core.nr_mmaps; i++) {
1188 		struct mmap *md = &evlist->mmap[i];
1189 
1190 		if (md->core.cpu.cpu == cpu)
1191 			return md;
1192 	}
1193 
1194 	return NULL;
1195 }
1196 
pyrf_evlist__read_on_cpu(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)1197 static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
1198 					  PyObject *args, PyObject *kwargs)
1199 {
1200 	struct evlist *evlist = &pevlist->evlist;
1201 	union perf_event *event;
1202 	int sample_id_all = 1, cpu;
1203 	static char *kwlist[] = { "cpu", "sample_id_all", NULL };
1204 	struct mmap *md;
1205 	int err;
1206 
1207 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
1208 					 &cpu, &sample_id_all))
1209 		return NULL;
1210 
1211 	md = get_md(evlist, cpu);
1212 	if (!md)
1213 		return NULL;
1214 
1215 	if (perf_mmap__read_init(&md->core) < 0)
1216 		goto end;
1217 
1218 	event = perf_mmap__read_event(&md->core);
1219 	if (event != NULL) {
1220 		PyObject *pyevent = pyrf_event__new(event);
1221 		struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
1222 		struct evsel *evsel;
1223 
1224 		if (pyevent == NULL)
1225 			return PyErr_NoMemory();
1226 
1227 		evsel = evlist__event2evsel(evlist, event);
1228 		if (!evsel) {
1229 			Py_DECREF(pyevent);
1230 			Py_INCREF(Py_None);
1231 			return Py_None;
1232 		}
1233 
1234 		pevent->evsel = evsel;
1235 
1236 		perf_mmap__consume(&md->core);
1237 
1238 		err = evsel__parse_sample(evsel, &pevent->event, &pevent->sample);
1239 		if (err) {
1240 			Py_DECREF(pyevent);
1241 			return PyErr_Format(PyExc_OSError,
1242 					    "perf: can't parse sample, err=%d", err);
1243 		}
1244 
1245 		return pyevent;
1246 	}
1247 end:
1248 	Py_INCREF(Py_None);
1249 	return Py_None;
1250 }
1251 
pyrf_evlist__open(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)1252 static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
1253 				   PyObject *args, PyObject *kwargs)
1254 {
1255 	struct evlist *evlist = &pevlist->evlist;
1256 
1257 	if (evlist__open(evlist) < 0) {
1258 		PyErr_SetFromErrno(PyExc_OSError);
1259 		return NULL;
1260 	}
1261 
1262 	Py_INCREF(Py_None);
1263 	return Py_None;
1264 }
1265 
pyrf_evlist__close(struct pyrf_evlist * pevlist)1266 static PyObject *pyrf_evlist__close(struct pyrf_evlist *pevlist)
1267 {
1268 	struct evlist *evlist = &pevlist->evlist;
1269 
1270 	evlist__close(evlist);
1271 
1272 	Py_INCREF(Py_None);
1273 	return Py_None;
1274 }
1275 
pyrf_evlist__config(struct pyrf_evlist * pevlist)1276 static PyObject *pyrf_evlist__config(struct pyrf_evlist *pevlist)
1277 {
1278 	struct record_opts opts = {
1279 		.sample_time	     = true,
1280 		.mmap_pages	     = UINT_MAX,
1281 		.user_freq	     = UINT_MAX,
1282 		.user_interval	     = ULLONG_MAX,
1283 		.freq		     = 4000,
1284 		.target		     = {
1285 			.uses_mmap   = true,
1286 			.default_per_cpu = true,
1287 		},
1288 		.nr_threads_synthesize = 1,
1289 		.ctl_fd              = -1,
1290 		.ctl_fd_ack          = -1,
1291 		.no_buffering        = true,
1292 		.no_inherit          = true,
1293 	};
1294 	struct evlist *evlist = &pevlist->evlist;
1295 
1296 	evlist__config(evlist, &opts, &callchain_param);
1297 	Py_INCREF(Py_None);
1298 	return Py_None;
1299 }
1300 
pyrf_evlist__disable(struct pyrf_evlist * pevlist)1301 static PyObject *pyrf_evlist__disable(struct pyrf_evlist *pevlist)
1302 {
1303 	evlist__disable(&pevlist->evlist);
1304 	Py_INCREF(Py_None);
1305 	return Py_None;
1306 }
1307 
pyrf_evlist__enable(struct pyrf_evlist * pevlist)1308 static PyObject *pyrf_evlist__enable(struct pyrf_evlist *pevlist)
1309 {
1310 	evlist__enable(&pevlist->evlist);
1311 	Py_INCREF(Py_None);
1312 	return Py_None;
1313 }
1314 
1315 static PyMethodDef pyrf_evlist__methods[] = {
1316 	{
1317 		.ml_name  = "all_cpus",
1318 		.ml_meth  = (PyCFunction)pyrf_evlist__all_cpus,
1319 		.ml_flags = METH_NOARGS,
1320 		.ml_doc	  = PyDoc_STR("CPU map union of all evsel CPU maps.")
1321 	},
1322 	{
1323 		.ml_name  = "mmap",
1324 		.ml_meth  = (PyCFunction)pyrf_evlist__mmap,
1325 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1326 		.ml_doc	  = PyDoc_STR("mmap the file descriptor table.")
1327 	},
1328 	{
1329 		.ml_name  = "open",
1330 		.ml_meth  = (PyCFunction)pyrf_evlist__open,
1331 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1332 		.ml_doc	  = PyDoc_STR("open the file descriptors.")
1333 	},
1334 	{
1335 		.ml_name  = "close",
1336 		.ml_meth  = (PyCFunction)pyrf_evlist__close,
1337 		.ml_flags = METH_NOARGS,
1338 		.ml_doc	  = PyDoc_STR("close the file descriptors.")
1339 	},
1340 	{
1341 		.ml_name  = "poll",
1342 		.ml_meth  = (PyCFunction)pyrf_evlist__poll,
1343 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1344 		.ml_doc	  = PyDoc_STR("poll the file descriptor table.")
1345 	},
1346 	{
1347 		.ml_name  = "get_pollfd",
1348 		.ml_meth  = (PyCFunction)pyrf_evlist__get_pollfd,
1349 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1350 		.ml_doc	  = PyDoc_STR("get the poll file descriptor table.")
1351 	},
1352 	{
1353 		.ml_name  = "add",
1354 		.ml_meth  = (PyCFunction)pyrf_evlist__add,
1355 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1356 		.ml_doc	  = PyDoc_STR("adds an event selector to the list.")
1357 	},
1358 	{
1359 		.ml_name  = "read_on_cpu",
1360 		.ml_meth  = (PyCFunction)pyrf_evlist__read_on_cpu,
1361 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1362 		.ml_doc	  = PyDoc_STR("reads an event.")
1363 	},
1364 	{
1365 		.ml_name  = "config",
1366 		.ml_meth  = (PyCFunction)pyrf_evlist__config,
1367 		.ml_flags = METH_NOARGS,
1368 		.ml_doc	  = PyDoc_STR("Apply default record options to the evlist.")
1369 	},
1370 	{
1371 		.ml_name  = "disable",
1372 		.ml_meth  = (PyCFunction)pyrf_evlist__disable,
1373 		.ml_flags = METH_NOARGS,
1374 		.ml_doc	  = PyDoc_STR("Disable the evsels in the evlist.")
1375 	},
1376 	{
1377 		.ml_name  = "enable",
1378 		.ml_meth  = (PyCFunction)pyrf_evlist__enable,
1379 		.ml_flags = METH_NOARGS,
1380 		.ml_doc	  = PyDoc_STR("Enable the evsels in the evlist.")
1381 	},
1382 	{ .ml_name = NULL, }
1383 };
1384 
pyrf_evlist__length(PyObject * obj)1385 static Py_ssize_t pyrf_evlist__length(PyObject *obj)
1386 {
1387 	struct pyrf_evlist *pevlist = (void *)obj;
1388 
1389 	return pevlist->evlist.core.nr_entries;
1390 }
1391 
pyrf_evlist__item(PyObject * obj,Py_ssize_t i)1392 static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
1393 {
1394 	struct pyrf_evlist *pevlist = (void *)obj;
1395 	struct evsel *pos;
1396 
1397 	if (i >= pevlist->evlist.core.nr_entries) {
1398 		PyErr_SetString(PyExc_IndexError, "Index out of range");
1399 		return NULL;
1400 	}
1401 
1402 	evlist__for_each_entry(&pevlist->evlist, pos) {
1403 		if (i-- == 0)
1404 			break;
1405 	}
1406 
1407 	return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
1408 }
1409 
pyrf_evlist__str(PyObject * self)1410 static PyObject *pyrf_evlist__str(PyObject *self)
1411 {
1412 	struct pyrf_evlist *pevlist = (void *)self;
1413 	struct evsel *pos;
1414 	struct strbuf sb = STRBUF_INIT;
1415 	bool first = true;
1416 	PyObject *result;
1417 
1418 	strbuf_addstr(&sb, "evlist([");
1419 	evlist__for_each_entry(&pevlist->evlist, pos) {
1420 		if (!first)
1421 			strbuf_addch(&sb, ',');
1422 		if (!pos->pmu)
1423 			strbuf_addstr(&sb, evsel__name(pos));
1424 		else
1425 			strbuf_addf(&sb, "%s/%s/", pos->pmu->name, evsel__name(pos));
1426 		first = false;
1427 	}
1428 	strbuf_addstr(&sb, "])");
1429 	result = PyUnicode_FromString(sb.buf);
1430 	strbuf_release(&sb);
1431 	return result;
1432 }
1433 
1434 static PySequenceMethods pyrf_evlist__sequence_methods = {
1435 	.sq_length = pyrf_evlist__length,
1436 	.sq_item   = pyrf_evlist__item,
1437 };
1438 
1439 static const char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
1440 
1441 static PyTypeObject pyrf_evlist__type = {
1442 	PyVarObject_HEAD_INIT(NULL, 0)
1443 	.tp_name	= "perf.evlist",
1444 	.tp_basicsize	= sizeof(struct pyrf_evlist),
1445 	.tp_dealloc	= (destructor)pyrf_evlist__delete,
1446 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
1447 	.tp_as_sequence	= &pyrf_evlist__sequence_methods,
1448 	.tp_doc		= pyrf_evlist__doc,
1449 	.tp_methods	= pyrf_evlist__methods,
1450 	.tp_init	= (initproc)pyrf_evlist__init,
1451 	.tp_repr        = pyrf_evlist__str,
1452 	.tp_str         = pyrf_evlist__str,
1453 };
1454 
pyrf_evlist__setup_types(void)1455 static int pyrf_evlist__setup_types(void)
1456 {
1457 	pyrf_evlist__type.tp_new = PyType_GenericNew;
1458 	return PyType_Ready(&pyrf_evlist__type);
1459 }
1460 
1461 #define PERF_CONST(name) { #name, PERF_##name }
1462 
1463 struct perf_constant {
1464 	const char *name;
1465 	int	    value;
1466 };
1467 
1468 static const struct perf_constant perf__constants[] = {
1469 	PERF_CONST(TYPE_HARDWARE),
1470 	PERF_CONST(TYPE_SOFTWARE),
1471 	PERF_CONST(TYPE_TRACEPOINT),
1472 	PERF_CONST(TYPE_HW_CACHE),
1473 	PERF_CONST(TYPE_RAW),
1474 	PERF_CONST(TYPE_BREAKPOINT),
1475 
1476 	PERF_CONST(COUNT_HW_CPU_CYCLES),
1477 	PERF_CONST(COUNT_HW_INSTRUCTIONS),
1478 	PERF_CONST(COUNT_HW_CACHE_REFERENCES),
1479 	PERF_CONST(COUNT_HW_CACHE_MISSES),
1480 	PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS),
1481 	PERF_CONST(COUNT_HW_BRANCH_MISSES),
1482 	PERF_CONST(COUNT_HW_BUS_CYCLES),
1483 	PERF_CONST(COUNT_HW_CACHE_L1D),
1484 	PERF_CONST(COUNT_HW_CACHE_L1I),
1485 	PERF_CONST(COUNT_HW_CACHE_LL),
1486 	PERF_CONST(COUNT_HW_CACHE_DTLB),
1487 	PERF_CONST(COUNT_HW_CACHE_ITLB),
1488 	PERF_CONST(COUNT_HW_CACHE_BPU),
1489 	PERF_CONST(COUNT_HW_CACHE_OP_READ),
1490 	PERF_CONST(COUNT_HW_CACHE_OP_WRITE),
1491 	PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH),
1492 	PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS),
1493 	PERF_CONST(COUNT_HW_CACHE_RESULT_MISS),
1494 
1495 	PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND),
1496 	PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND),
1497 
1498 	PERF_CONST(COUNT_SW_CPU_CLOCK),
1499 	PERF_CONST(COUNT_SW_TASK_CLOCK),
1500 	PERF_CONST(COUNT_SW_PAGE_FAULTS),
1501 	PERF_CONST(COUNT_SW_CONTEXT_SWITCHES),
1502 	PERF_CONST(COUNT_SW_CPU_MIGRATIONS),
1503 	PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN),
1504 	PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ),
1505 	PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS),
1506 	PERF_CONST(COUNT_SW_EMULATION_FAULTS),
1507 	PERF_CONST(COUNT_SW_DUMMY),
1508 
1509 	PERF_CONST(SAMPLE_IP),
1510 	PERF_CONST(SAMPLE_TID),
1511 	PERF_CONST(SAMPLE_TIME),
1512 	PERF_CONST(SAMPLE_ADDR),
1513 	PERF_CONST(SAMPLE_READ),
1514 	PERF_CONST(SAMPLE_CALLCHAIN),
1515 	PERF_CONST(SAMPLE_ID),
1516 	PERF_CONST(SAMPLE_CPU),
1517 	PERF_CONST(SAMPLE_PERIOD),
1518 	PERF_CONST(SAMPLE_STREAM_ID),
1519 	PERF_CONST(SAMPLE_RAW),
1520 
1521 	PERF_CONST(FORMAT_TOTAL_TIME_ENABLED),
1522 	PERF_CONST(FORMAT_TOTAL_TIME_RUNNING),
1523 	PERF_CONST(FORMAT_ID),
1524 	PERF_CONST(FORMAT_GROUP),
1525 
1526 	PERF_CONST(RECORD_MMAP),
1527 	PERF_CONST(RECORD_LOST),
1528 	PERF_CONST(RECORD_COMM),
1529 	PERF_CONST(RECORD_EXIT),
1530 	PERF_CONST(RECORD_THROTTLE),
1531 	PERF_CONST(RECORD_UNTHROTTLE),
1532 	PERF_CONST(RECORD_FORK),
1533 	PERF_CONST(RECORD_READ),
1534 	PERF_CONST(RECORD_SAMPLE),
1535 	PERF_CONST(RECORD_MMAP2),
1536 	PERF_CONST(RECORD_AUX),
1537 	PERF_CONST(RECORD_ITRACE_START),
1538 	PERF_CONST(RECORD_LOST_SAMPLES),
1539 	PERF_CONST(RECORD_SWITCH),
1540 	PERF_CONST(RECORD_SWITCH_CPU_WIDE),
1541 
1542 	PERF_CONST(RECORD_MISC_SWITCH_OUT),
1543 	{ .name = NULL, },
1544 };
1545 
pyrf__tracepoint(struct pyrf_evsel * pevsel,PyObject * args,PyObject * kwargs)1546 static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
1547 				  PyObject *args, PyObject *kwargs)
1548 {
1549 #ifndef HAVE_LIBTRACEEVENT
1550 	return NULL;
1551 #else
1552 	struct tep_event *tp_format;
1553 	static char *kwlist[] = { "sys", "name", NULL };
1554 	char *sys  = NULL;
1555 	char *name = NULL;
1556 
1557 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist,
1558 					 &sys, &name))
1559 		return NULL;
1560 
1561 	tp_format = trace_event__tp_format(sys, name);
1562 	if (IS_ERR(tp_format))
1563 		return PyLong_FromLong(-1);
1564 
1565 	return PyLong_FromLong(tp_format->id);
1566 #endif // HAVE_LIBTRACEEVENT
1567 }
1568 
pyrf_evsel__from_evsel(struct evsel * evsel)1569 static PyObject *pyrf_evsel__from_evsel(struct evsel *evsel)
1570 {
1571 	struct pyrf_evsel *pevsel = PyObject_New(struct pyrf_evsel, &pyrf_evsel__type);
1572 
1573 	if (!pevsel)
1574 		return NULL;
1575 
1576 	memset(&pevsel->evsel, 0, sizeof(pevsel->evsel));
1577 	evsel__init(&pevsel->evsel, &evsel->core.attr, evsel->core.idx);
1578 
1579 	evsel__clone(&pevsel->evsel, evsel);
1580 	if (evsel__is_group_leader(evsel))
1581 		evsel__set_leader(&pevsel->evsel, &pevsel->evsel);
1582 	return (PyObject *)pevsel;
1583 }
1584 
evlist__pos(struct evlist * evlist,struct evsel * evsel)1585 static int evlist__pos(struct evlist *evlist, struct evsel *evsel)
1586 {
1587 	struct evsel *pos;
1588 	int idx = 0;
1589 
1590 	evlist__for_each_entry(evlist, pos) {
1591 		if (evsel == pos)
1592 			return idx;
1593 		idx++;
1594 	}
1595 	return -1;
1596 }
1597 
evlist__at(struct evlist * evlist,int idx)1598 static struct evsel *evlist__at(struct evlist *evlist, int idx)
1599 {
1600 	struct evsel *pos;
1601 	int idx2 = 0;
1602 
1603 	evlist__for_each_entry(evlist, pos) {
1604 		if (idx == idx2)
1605 			return pos;
1606 		idx2++;
1607 	}
1608 	return NULL;
1609 }
1610 
pyrf_evlist__from_evlist(struct evlist * evlist)1611 static PyObject *pyrf_evlist__from_evlist(struct evlist *evlist)
1612 {
1613 	struct pyrf_evlist *pevlist = PyObject_New(struct pyrf_evlist, &pyrf_evlist__type);
1614 	struct evsel *pos;
1615 	struct rb_node *node;
1616 
1617 	if (!pevlist)
1618 		return NULL;
1619 
1620 	memset(&pevlist->evlist, 0, sizeof(pevlist->evlist));
1621 	evlist__init(&pevlist->evlist, evlist->core.all_cpus, evlist->core.threads);
1622 	evlist__for_each_entry(evlist, pos) {
1623 		struct pyrf_evsel *pevsel = (void *)pyrf_evsel__from_evsel(pos);
1624 
1625 		evlist__add(&pevlist->evlist, &pevsel->evsel);
1626 	}
1627 	evlist__for_each_entry(&pevlist->evlist, pos) {
1628 		struct evsel *leader = evsel__leader(pos);
1629 
1630 		if (pos != leader) {
1631 			int idx = evlist__pos(evlist, leader);
1632 
1633 			if (idx >= 0)
1634 				evsel__set_leader(pos, evlist__at(&pevlist->evlist, idx));
1635 			else if (leader == NULL)
1636 				evsel__set_leader(pos, pos);
1637 		}
1638 	}
1639 	metricgroup__copy_metric_events(&pevlist->evlist, /*cgrp=*/NULL,
1640 					&pevlist->evlist.metric_events,
1641 					&evlist->metric_events);
1642 	for (node = rb_first_cached(&pevlist->evlist.metric_events.entries); node;
1643 	     node = rb_next(node)) {
1644 		struct metric_event *me = container_of(node, struct metric_event, nd);
1645 		struct list_head *mpos;
1646 		int idx = evlist__pos(evlist, me->evsel);
1647 
1648 		if (idx >= 0)
1649 			me->evsel = evlist__at(&pevlist->evlist, idx);
1650 		list_for_each(mpos, &me->head) {
1651 			struct metric_expr *e = container_of(mpos, struct metric_expr, nd);
1652 
1653 			for (int j = 0; e->metric_events[j]; j++) {
1654 				idx = evlist__pos(evlist, e->metric_events[j]);
1655 				if (idx >= 0)
1656 					e->metric_events[j] = evlist__at(&pevlist->evlist, idx);
1657 			}
1658 		}
1659 	}
1660 	return (PyObject *)pevlist;
1661 }
1662 
pyrf__parse_events(PyObject * self,PyObject * args)1663 static PyObject *pyrf__parse_events(PyObject *self, PyObject *args)
1664 {
1665 	const char *input;
1666 	struct evlist evlist = {};
1667 	struct parse_events_error err;
1668 	PyObject *result;
1669 	PyObject *pcpus = NULL, *pthreads = NULL;
1670 	struct perf_cpu_map *cpus;
1671 	struct perf_thread_map *threads;
1672 
1673 	if (!PyArg_ParseTuple(args, "s|OO", &input, &pcpus, &pthreads))
1674 		return NULL;
1675 
1676 	threads = pthreads ? ((struct pyrf_thread_map *)pthreads)->threads : NULL;
1677 	cpus = pcpus ? ((struct pyrf_cpu_map *)pcpus)->cpus : NULL;
1678 
1679 	parse_events_error__init(&err);
1680 	evlist__init(&evlist, cpus, threads);
1681 	if (parse_events(&evlist, input, &err)) {
1682 		parse_events_error__print(&err, input);
1683 		PyErr_SetFromErrno(PyExc_OSError);
1684 		return NULL;
1685 	}
1686 	result = pyrf_evlist__from_evlist(&evlist);
1687 	evlist__exit(&evlist);
1688 	return result;
1689 }
1690 
1691 static PyMethodDef perf__methods[] = {
1692 	{
1693 		.ml_name  = "tracepoint",
1694 		.ml_meth  = (PyCFunction) pyrf__tracepoint,
1695 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1696 		.ml_doc	  = PyDoc_STR("Get tracepoint config.")
1697 	},
1698 	{
1699 		.ml_name  = "parse_events",
1700 		.ml_meth  = (PyCFunction) pyrf__parse_events,
1701 		.ml_flags = METH_VARARGS,
1702 		.ml_doc	  = PyDoc_STR("Parse a string of events and return an evlist.")
1703 	},
1704 	{ .ml_name = NULL, }
1705 };
1706 
PyInit_perf(void)1707 PyMODINIT_FUNC PyInit_perf(void)
1708 {
1709 	PyObject *obj;
1710 	int i;
1711 	PyObject *dict;
1712 	static struct PyModuleDef moduledef = {
1713 		PyModuleDef_HEAD_INIT,
1714 		"perf",			/* m_name */
1715 		"",			/* m_doc */
1716 		-1,			/* m_size */
1717 		perf__methods,		/* m_methods */
1718 		NULL,			/* m_reload */
1719 		NULL,			/* m_traverse */
1720 		NULL,			/* m_clear */
1721 		NULL,			/* m_free */
1722 	};
1723 	PyObject *module = PyModule_Create(&moduledef);
1724 
1725 	if (module == NULL ||
1726 	    pyrf_event__setup_types() < 0 ||
1727 	    pyrf_evlist__setup_types() < 0 ||
1728 	    pyrf_evsel__setup_types() < 0 ||
1729 	    pyrf_thread_map__setup_types() < 0 ||
1730 	    pyrf_cpu_map__setup_types() < 0 ||
1731 	    pyrf_counts_values__setup_types() < 0)
1732 		return module;
1733 
1734 	/* The page_size is placed in util object. */
1735 	page_size = sysconf(_SC_PAGE_SIZE);
1736 
1737 	Py_INCREF(&pyrf_evlist__type);
1738 	PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
1739 
1740 	Py_INCREF(&pyrf_evsel__type);
1741 	PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
1742 
1743 	Py_INCREF(&pyrf_mmap_event__type);
1744 	PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type);
1745 
1746 	Py_INCREF(&pyrf_lost_event__type);
1747 	PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type);
1748 
1749 	Py_INCREF(&pyrf_comm_event__type);
1750 	PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type);
1751 
1752 	Py_INCREF(&pyrf_task_event__type);
1753 	PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1754 
1755 	Py_INCREF(&pyrf_throttle_event__type);
1756 	PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type);
1757 
1758 	Py_INCREF(&pyrf_task_event__type);
1759 	PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1760 
1761 	Py_INCREF(&pyrf_read_event__type);
1762 	PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type);
1763 
1764 	Py_INCREF(&pyrf_sample_event__type);
1765 	PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type);
1766 
1767 	Py_INCREF(&pyrf_context_switch_event__type);
1768 	PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type);
1769 
1770 	Py_INCREF(&pyrf_thread_map__type);
1771 	PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
1772 
1773 	Py_INCREF(&pyrf_cpu_map__type);
1774 	PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
1775 
1776 	Py_INCREF(&pyrf_counts_values__type);
1777 	PyModule_AddObject(module, "counts_values", (PyObject *)&pyrf_counts_values__type);
1778 
1779 	dict = PyModule_GetDict(module);
1780 	if (dict == NULL)
1781 		goto error;
1782 
1783 	for (i = 0; perf__constants[i].name != NULL; i++) {
1784 		obj = PyLong_FromLong(perf__constants[i].value);
1785 		if (obj == NULL)
1786 			goto error;
1787 		PyDict_SetItemString(dict, perf__constants[i].name, obj);
1788 		Py_DECREF(obj);
1789 	}
1790 
1791 error:
1792 	if (PyErr_Occurred())
1793 		PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
1794 	return module;
1795 }
1796