1 // SPDX-License-Identifier: GPL-2.0 2 #include <Python.h> 3 #include <structmember.h> 4 #include <inttypes.h> 5 #include <poll.h> 6 #include <linux/err.h> 7 #include <perf/cpumap.h> 8 #ifdef HAVE_LIBTRACEEVENT 9 #include <traceevent/event-parse.h> 10 #endif 11 #include <perf/mmap.h> 12 #include "evlist.h" 13 #include "callchain.h" 14 #include "evsel.h" 15 #include "event.h" 16 #include "print_binary.h" 17 #include "thread_map.h" 18 #include "trace-event.h" 19 #include "mmap.h" 20 #include "stat.h" 21 #include "metricgroup.h" 22 #include "util/env.h" 23 #include "util/pmu.h" 24 #include <internal/lib.h> 25 #include "util.h" 26 27 #if PY_MAJOR_VERSION < 3 28 #define _PyUnicode_FromString(arg) \ 29 PyString_FromString(arg) 30 #define _PyUnicode_AsString(arg) \ 31 PyString_AsString(arg) 32 #define _PyUnicode_FromFormat(...) \ 33 PyString_FromFormat(__VA_ARGS__) 34 #define _PyLong_FromLong(arg) \ 35 PyInt_FromLong(arg) 36 37 #else 38 39 #define _PyUnicode_FromString(arg) \ 40 PyUnicode_FromString(arg) 41 #define _PyUnicode_FromFormat(...) \ 42 PyUnicode_FromFormat(__VA_ARGS__) 43 #define _PyLong_FromLong(arg) \ 44 PyLong_FromLong(arg) 45 #endif 46 47 #ifndef Py_TYPE 48 #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) 49 #endif 50 51 /* 52 * Provide these two so that we don't have to link against callchain.c and 53 * start dragging hist.c, etc. 54 */ 55 struct callchain_param callchain_param; 56 57 int parse_callchain_record(const char *arg __maybe_unused, 58 struct callchain_param *param __maybe_unused) 59 { 60 return 0; 61 } 62 63 /* 64 * Add these not to drag util/env.c 65 */ 66 struct perf_env perf_env; 67 68 const char *perf_env__cpuid(struct perf_env *env __maybe_unused) 69 { 70 return NULL; 71 } 72 73 // This one is a bit easier, wouldn't drag too much, but leave it as a stub we need it here 74 const char *perf_env__arch(struct perf_env *env __maybe_unused) 75 { 76 return NULL; 77 } 78 79 /* 80 * These ones are needed not to drag the PMU bandwagon, jevents generated 81 * pmu_sys_event_tables, etc and evsel__find_pmu() is used so far just for 82 * doing per PMU perf_event_attr.exclude_guest handling, not really needed, so 83 * far, for the perf python binding known usecases, revisit if this become 84 * necessary. 85 */ 86 struct perf_pmu *evsel__find_pmu(struct evsel *evsel __maybe_unused) 87 { 88 return NULL; 89 } 90 91 int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, ...) 92 { 93 return EOF; 94 } 95 96 /* 97 * Add this one here not to drag util/metricgroup.c 98 */ 99 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp, 100 struct rblist *new_metric_events, 101 struct rblist *old_metric_events) 102 { 103 return 0; 104 } 105 106 /* 107 * XXX: All these evsel destructors need some better mechanism, like a linked 108 * list of destructors registered when the relevant code indeed is used instead 109 * of having more and more calls in perf_evsel__delete(). -- acme 110 * 111 * For now, add some more: 112 * 113 * Not to drag the BPF bandwagon... 114 */ 115 void bpf_counter__destroy(struct evsel *evsel); 116 int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd); 117 int bpf_counter__disable(struct evsel *evsel); 118 119 void bpf_counter__destroy(struct evsel *evsel __maybe_unused) 120 { 121 } 122 123 int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_unused, int fd __maybe_unused) 124 { 125 return 0; 126 } 127 128 int bpf_counter__disable(struct evsel *evsel __maybe_unused) 129 { 130 return 0; 131 } 132 133 /* 134 * Support debug printing even though util/debug.c is not linked. That means 135 * implementing 'verbose' and 'eprintf'. 136 */ 137 int verbose; 138 int debug_peo_args; 139 140 int eprintf(int level, int var, const char *fmt, ...); 141 142 int eprintf(int level, int var, const char *fmt, ...) 143 { 144 va_list args; 145 int ret = 0; 146 147 if (var >= level) { 148 va_start(args, fmt); 149 ret = vfprintf(stderr, fmt, args); 150 va_end(args); 151 } 152 153 return ret; 154 } 155 156 /* Define PyVarObject_HEAD_INIT for python 2.5 */ 157 #ifndef PyVarObject_HEAD_INIT 158 # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, 159 #endif 160 161 #if PY_MAJOR_VERSION < 3 162 PyMODINIT_FUNC initperf(void); 163 #else 164 PyMODINIT_FUNC PyInit_perf(void); 165 #endif 166 167 #define member_def(type, member, ptype, help) \ 168 { #member, ptype, \ 169 offsetof(struct pyrf_event, event) + offsetof(struct type, member), \ 170 0, help } 171 172 #define sample_member_def(name, member, ptype, help) \ 173 { #name, ptype, \ 174 offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \ 175 0, help } 176 177 struct pyrf_event { 178 PyObject_HEAD 179 struct evsel *evsel; 180 struct perf_sample sample; 181 union perf_event event; 182 }; 183 184 #define sample_members \ 185 sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \ 186 sample_member_def(sample_pid, pid, T_INT, "event pid"), \ 187 sample_member_def(sample_tid, tid, T_INT, "event tid"), \ 188 sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \ 189 sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \ 190 sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \ 191 sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \ 192 sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \ 193 sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"), 194 195 static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object."); 196 197 static PyMemberDef pyrf_mmap_event__members[] = { 198 sample_members 199 member_def(perf_event_header, type, T_UINT, "event type"), 200 member_def(perf_event_header, misc, T_UINT, "event misc"), 201 member_def(perf_record_mmap, pid, T_UINT, "event pid"), 202 member_def(perf_record_mmap, tid, T_UINT, "event tid"), 203 member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"), 204 member_def(perf_record_mmap, len, T_ULONGLONG, "map length"), 205 member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"), 206 member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"), 207 { .name = NULL, }, 208 }; 209 210 static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent) 211 { 212 PyObject *ret; 213 char *s; 214 215 if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", " 216 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", " 217 "filename: %s }", 218 pevent->event.mmap.pid, pevent->event.mmap.tid, 219 pevent->event.mmap.start, pevent->event.mmap.len, 220 pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) { 221 ret = PyErr_NoMemory(); 222 } else { 223 ret = _PyUnicode_FromString(s); 224 free(s); 225 } 226 return ret; 227 } 228 229 static PyTypeObject pyrf_mmap_event__type = { 230 PyVarObject_HEAD_INIT(NULL, 0) 231 .tp_name = "perf.mmap_event", 232 .tp_basicsize = sizeof(struct pyrf_event), 233 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 234 .tp_doc = pyrf_mmap_event__doc, 235 .tp_members = pyrf_mmap_event__members, 236 .tp_repr = (reprfunc)pyrf_mmap_event__repr, 237 }; 238 239 static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object."); 240 241 static PyMemberDef pyrf_task_event__members[] = { 242 sample_members 243 member_def(perf_event_header, type, T_UINT, "event type"), 244 member_def(perf_record_fork, pid, T_UINT, "event pid"), 245 member_def(perf_record_fork, ppid, T_UINT, "event ppid"), 246 member_def(perf_record_fork, tid, T_UINT, "event tid"), 247 member_def(perf_record_fork, ptid, T_UINT, "event ptid"), 248 member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"), 249 { .name = NULL, }, 250 }; 251 252 static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent) 253 { 254 return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, " 255 "ptid: %u, time: %" PRI_lu64 "}", 256 pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit", 257 pevent->event.fork.pid, 258 pevent->event.fork.ppid, 259 pevent->event.fork.tid, 260 pevent->event.fork.ptid, 261 pevent->event.fork.time); 262 } 263 264 static PyTypeObject pyrf_task_event__type = { 265 PyVarObject_HEAD_INIT(NULL, 0) 266 .tp_name = "perf.task_event", 267 .tp_basicsize = sizeof(struct pyrf_event), 268 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 269 .tp_doc = pyrf_task_event__doc, 270 .tp_members = pyrf_task_event__members, 271 .tp_repr = (reprfunc)pyrf_task_event__repr, 272 }; 273 274 static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object."); 275 276 static PyMemberDef pyrf_comm_event__members[] = { 277 sample_members 278 member_def(perf_event_header, type, T_UINT, "event type"), 279 member_def(perf_record_comm, pid, T_UINT, "event pid"), 280 member_def(perf_record_comm, tid, T_UINT, "event tid"), 281 member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"), 282 { .name = NULL, }, 283 }; 284 285 static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent) 286 { 287 return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }", 288 pevent->event.comm.pid, 289 pevent->event.comm.tid, 290 pevent->event.comm.comm); 291 } 292 293 static PyTypeObject pyrf_comm_event__type = { 294 PyVarObject_HEAD_INIT(NULL, 0) 295 .tp_name = "perf.comm_event", 296 .tp_basicsize = sizeof(struct pyrf_event), 297 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 298 .tp_doc = pyrf_comm_event__doc, 299 .tp_members = pyrf_comm_event__members, 300 .tp_repr = (reprfunc)pyrf_comm_event__repr, 301 }; 302 303 static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object."); 304 305 static PyMemberDef pyrf_throttle_event__members[] = { 306 sample_members 307 member_def(perf_event_header, type, T_UINT, "event type"), 308 member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"), 309 member_def(perf_record_throttle, id, T_ULONGLONG, "event id"), 310 member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"), 311 { .name = NULL, }, 312 }; 313 314 static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent) 315 { 316 struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1); 317 318 return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64 319 ", stream_id: %" PRI_lu64 " }", 320 pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un", 321 te->time, te->id, te->stream_id); 322 } 323 324 static PyTypeObject pyrf_throttle_event__type = { 325 PyVarObject_HEAD_INIT(NULL, 0) 326 .tp_name = "perf.throttle_event", 327 .tp_basicsize = sizeof(struct pyrf_event), 328 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 329 .tp_doc = pyrf_throttle_event__doc, 330 .tp_members = pyrf_throttle_event__members, 331 .tp_repr = (reprfunc)pyrf_throttle_event__repr, 332 }; 333 334 static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object."); 335 336 static PyMemberDef pyrf_lost_event__members[] = { 337 sample_members 338 member_def(perf_record_lost, id, T_ULONGLONG, "event id"), 339 member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"), 340 { .name = NULL, }, 341 }; 342 343 static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent) 344 { 345 PyObject *ret; 346 char *s; 347 348 if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", " 349 "lost: %#" PRI_lx64 " }", 350 pevent->event.lost.id, pevent->event.lost.lost) < 0) { 351 ret = PyErr_NoMemory(); 352 } else { 353 ret = _PyUnicode_FromString(s); 354 free(s); 355 } 356 return ret; 357 } 358 359 static PyTypeObject pyrf_lost_event__type = { 360 PyVarObject_HEAD_INIT(NULL, 0) 361 .tp_name = "perf.lost_event", 362 .tp_basicsize = sizeof(struct pyrf_event), 363 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 364 .tp_doc = pyrf_lost_event__doc, 365 .tp_members = pyrf_lost_event__members, 366 .tp_repr = (reprfunc)pyrf_lost_event__repr, 367 }; 368 369 static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object."); 370 371 static PyMemberDef pyrf_read_event__members[] = { 372 sample_members 373 member_def(perf_record_read, pid, T_UINT, "event pid"), 374 member_def(perf_record_read, tid, T_UINT, "event tid"), 375 { .name = NULL, }, 376 }; 377 378 static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent) 379 { 380 return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }", 381 pevent->event.read.pid, 382 pevent->event.read.tid); 383 /* 384 * FIXME: return the array of read values, 385 * making this method useful ;-) 386 */ 387 } 388 389 static PyTypeObject pyrf_read_event__type = { 390 PyVarObject_HEAD_INIT(NULL, 0) 391 .tp_name = "perf.read_event", 392 .tp_basicsize = sizeof(struct pyrf_event), 393 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 394 .tp_doc = pyrf_read_event__doc, 395 .tp_members = pyrf_read_event__members, 396 .tp_repr = (reprfunc)pyrf_read_event__repr, 397 }; 398 399 static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object."); 400 401 static PyMemberDef pyrf_sample_event__members[] = { 402 sample_members 403 member_def(perf_event_header, type, T_UINT, "event type"), 404 { .name = NULL, }, 405 }; 406 407 static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent) 408 { 409 PyObject *ret; 410 char *s; 411 412 if (asprintf(&s, "{ type: sample }") < 0) { 413 ret = PyErr_NoMemory(); 414 } else { 415 ret = _PyUnicode_FromString(s); 416 free(s); 417 } 418 return ret; 419 } 420 421 #ifdef HAVE_LIBTRACEEVENT 422 static bool is_tracepoint(struct pyrf_event *pevent) 423 { 424 return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT; 425 } 426 427 static PyObject* 428 tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field) 429 { 430 struct tep_handle *pevent = field->event->tep; 431 void *data = pe->sample.raw_data; 432 PyObject *ret = NULL; 433 unsigned long long val; 434 unsigned int offset, len; 435 436 if (field->flags & TEP_FIELD_IS_ARRAY) { 437 offset = field->offset; 438 len = field->size; 439 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 440 val = tep_read_number(pevent, data + offset, len); 441 offset = val; 442 len = offset >> 16; 443 offset &= 0xffff; 444 if (tep_field_is_relative(field->flags)) 445 offset += field->offset + field->size; 446 } 447 if (field->flags & TEP_FIELD_IS_STRING && 448 is_printable_array(data + offset, len)) { 449 ret = _PyUnicode_FromString((char *)data + offset); 450 } else { 451 ret = PyByteArray_FromStringAndSize((const char *) data + offset, len); 452 field->flags &= ~TEP_FIELD_IS_STRING; 453 } 454 } else { 455 val = tep_read_number(pevent, data + field->offset, 456 field->size); 457 if (field->flags & TEP_FIELD_IS_POINTER) 458 ret = PyLong_FromUnsignedLong((unsigned long) val); 459 else if (field->flags & TEP_FIELD_IS_SIGNED) 460 ret = PyLong_FromLong((long) val); 461 else 462 ret = PyLong_FromUnsignedLong((unsigned long) val); 463 } 464 465 return ret; 466 } 467 468 static PyObject* 469 get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name) 470 { 471 const char *str = _PyUnicode_AsString(PyObject_Str(attr_name)); 472 struct evsel *evsel = pevent->evsel; 473 struct tep_format_field *field; 474 475 if (!evsel->tp_format) { 476 struct tep_event *tp_format; 477 478 tp_format = trace_event__tp_format_id(evsel->core.attr.config); 479 if (IS_ERR_OR_NULL(tp_format)) 480 return NULL; 481 482 evsel->tp_format = tp_format; 483 } 484 485 field = tep_find_any_field(evsel->tp_format, str); 486 if (!field) 487 return NULL; 488 489 return tracepoint_field(pevent, field); 490 } 491 #endif /* HAVE_LIBTRACEEVENT */ 492 493 static PyObject* 494 pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name) 495 { 496 PyObject *obj = NULL; 497 498 #ifdef HAVE_LIBTRACEEVENT 499 if (is_tracepoint(pevent)) 500 obj = get_tracepoint_field(pevent, attr_name); 501 #endif 502 503 return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name); 504 } 505 506 static PyTypeObject pyrf_sample_event__type = { 507 PyVarObject_HEAD_INIT(NULL, 0) 508 .tp_name = "perf.sample_event", 509 .tp_basicsize = sizeof(struct pyrf_event), 510 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 511 .tp_doc = pyrf_sample_event__doc, 512 .tp_members = pyrf_sample_event__members, 513 .tp_repr = (reprfunc)pyrf_sample_event__repr, 514 .tp_getattro = (getattrofunc) pyrf_sample_event__getattro, 515 }; 516 517 static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object."); 518 519 static PyMemberDef pyrf_context_switch_event__members[] = { 520 sample_members 521 member_def(perf_event_header, type, T_UINT, "event type"), 522 member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"), 523 member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"), 524 { .name = NULL, }, 525 }; 526 527 static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent) 528 { 529 PyObject *ret; 530 char *s; 531 532 if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }", 533 pevent->event.context_switch.next_prev_pid, 534 pevent->event.context_switch.next_prev_tid, 535 !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) { 536 ret = PyErr_NoMemory(); 537 } else { 538 ret = _PyUnicode_FromString(s); 539 free(s); 540 } 541 return ret; 542 } 543 544 static PyTypeObject pyrf_context_switch_event__type = { 545 PyVarObject_HEAD_INIT(NULL, 0) 546 .tp_name = "perf.context_switch_event", 547 .tp_basicsize = sizeof(struct pyrf_event), 548 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 549 .tp_doc = pyrf_context_switch_event__doc, 550 .tp_members = pyrf_context_switch_event__members, 551 .tp_repr = (reprfunc)pyrf_context_switch_event__repr, 552 }; 553 554 static int pyrf_event__setup_types(void) 555 { 556 int err; 557 pyrf_mmap_event__type.tp_new = 558 pyrf_task_event__type.tp_new = 559 pyrf_comm_event__type.tp_new = 560 pyrf_lost_event__type.tp_new = 561 pyrf_read_event__type.tp_new = 562 pyrf_sample_event__type.tp_new = 563 pyrf_context_switch_event__type.tp_new = 564 pyrf_throttle_event__type.tp_new = PyType_GenericNew; 565 err = PyType_Ready(&pyrf_mmap_event__type); 566 if (err < 0) 567 goto out; 568 err = PyType_Ready(&pyrf_lost_event__type); 569 if (err < 0) 570 goto out; 571 err = PyType_Ready(&pyrf_task_event__type); 572 if (err < 0) 573 goto out; 574 err = PyType_Ready(&pyrf_comm_event__type); 575 if (err < 0) 576 goto out; 577 err = PyType_Ready(&pyrf_throttle_event__type); 578 if (err < 0) 579 goto out; 580 err = PyType_Ready(&pyrf_read_event__type); 581 if (err < 0) 582 goto out; 583 err = PyType_Ready(&pyrf_sample_event__type); 584 if (err < 0) 585 goto out; 586 err = PyType_Ready(&pyrf_context_switch_event__type); 587 if (err < 0) 588 goto out; 589 out: 590 return err; 591 } 592 593 static PyTypeObject *pyrf_event__type[] = { 594 [PERF_RECORD_MMAP] = &pyrf_mmap_event__type, 595 [PERF_RECORD_LOST] = &pyrf_lost_event__type, 596 [PERF_RECORD_COMM] = &pyrf_comm_event__type, 597 [PERF_RECORD_EXIT] = &pyrf_task_event__type, 598 [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type, 599 [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type, 600 [PERF_RECORD_FORK] = &pyrf_task_event__type, 601 [PERF_RECORD_READ] = &pyrf_read_event__type, 602 [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type, 603 [PERF_RECORD_SWITCH] = &pyrf_context_switch_event__type, 604 [PERF_RECORD_SWITCH_CPU_WIDE] = &pyrf_context_switch_event__type, 605 }; 606 607 static PyObject *pyrf_event__new(union perf_event *event) 608 { 609 struct pyrf_event *pevent; 610 PyTypeObject *ptype; 611 612 if ((event->header.type < PERF_RECORD_MMAP || 613 event->header.type > PERF_RECORD_SAMPLE) && 614 !(event->header.type == PERF_RECORD_SWITCH || 615 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)) 616 return NULL; 617 618 ptype = pyrf_event__type[event->header.type]; 619 pevent = PyObject_New(struct pyrf_event, ptype); 620 if (pevent != NULL) 621 memcpy(&pevent->event, event, event->header.size); 622 return (PyObject *)pevent; 623 } 624 625 struct pyrf_cpu_map { 626 PyObject_HEAD 627 628 struct perf_cpu_map *cpus; 629 }; 630 631 static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus, 632 PyObject *args, PyObject *kwargs) 633 { 634 static char *kwlist[] = { "cpustr", NULL }; 635 char *cpustr = NULL; 636 637 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s", 638 kwlist, &cpustr)) 639 return -1; 640 641 pcpus->cpus = perf_cpu_map__new(cpustr); 642 if (pcpus->cpus == NULL) 643 return -1; 644 return 0; 645 } 646 647 static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus) 648 { 649 perf_cpu_map__put(pcpus->cpus); 650 Py_TYPE(pcpus)->tp_free((PyObject*)pcpus); 651 } 652 653 static Py_ssize_t pyrf_cpu_map__length(PyObject *obj) 654 { 655 struct pyrf_cpu_map *pcpus = (void *)obj; 656 657 return perf_cpu_map__nr(pcpus->cpus); 658 } 659 660 static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i) 661 { 662 struct pyrf_cpu_map *pcpus = (void *)obj; 663 664 if (i >= perf_cpu_map__nr(pcpus->cpus)) 665 return NULL; 666 667 return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu); 668 } 669 670 static PySequenceMethods pyrf_cpu_map__sequence_methods = { 671 .sq_length = pyrf_cpu_map__length, 672 .sq_item = pyrf_cpu_map__item, 673 }; 674 675 static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object."); 676 677 static PyTypeObject pyrf_cpu_map__type = { 678 PyVarObject_HEAD_INIT(NULL, 0) 679 .tp_name = "perf.cpu_map", 680 .tp_basicsize = sizeof(struct pyrf_cpu_map), 681 .tp_dealloc = (destructor)pyrf_cpu_map__delete, 682 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 683 .tp_doc = pyrf_cpu_map__doc, 684 .tp_as_sequence = &pyrf_cpu_map__sequence_methods, 685 .tp_init = (initproc)pyrf_cpu_map__init, 686 }; 687 688 static int pyrf_cpu_map__setup_types(void) 689 { 690 pyrf_cpu_map__type.tp_new = PyType_GenericNew; 691 return PyType_Ready(&pyrf_cpu_map__type); 692 } 693 694 struct pyrf_thread_map { 695 PyObject_HEAD 696 697 struct perf_thread_map *threads; 698 }; 699 700 static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads, 701 PyObject *args, PyObject *kwargs) 702 { 703 static char *kwlist[] = { "pid", "tid", "uid", NULL }; 704 int pid = -1, tid = -1, uid = UINT_MAX; 705 706 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii", 707 kwlist, &pid, &tid, &uid)) 708 return -1; 709 710 pthreads->threads = thread_map__new(pid, tid, uid); 711 if (pthreads->threads == NULL) 712 return -1; 713 return 0; 714 } 715 716 static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads) 717 { 718 perf_thread_map__put(pthreads->threads); 719 Py_TYPE(pthreads)->tp_free((PyObject*)pthreads); 720 } 721 722 static Py_ssize_t pyrf_thread_map__length(PyObject *obj) 723 { 724 struct pyrf_thread_map *pthreads = (void *)obj; 725 726 return perf_thread_map__nr(pthreads->threads); 727 } 728 729 static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i) 730 { 731 struct pyrf_thread_map *pthreads = (void *)obj; 732 733 if (i >= perf_thread_map__nr(pthreads->threads)) 734 return NULL; 735 736 return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i)); 737 } 738 739 static PySequenceMethods pyrf_thread_map__sequence_methods = { 740 .sq_length = pyrf_thread_map__length, 741 .sq_item = pyrf_thread_map__item, 742 }; 743 744 static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object."); 745 746 static PyTypeObject pyrf_thread_map__type = { 747 PyVarObject_HEAD_INIT(NULL, 0) 748 .tp_name = "perf.thread_map", 749 .tp_basicsize = sizeof(struct pyrf_thread_map), 750 .tp_dealloc = (destructor)pyrf_thread_map__delete, 751 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 752 .tp_doc = pyrf_thread_map__doc, 753 .tp_as_sequence = &pyrf_thread_map__sequence_methods, 754 .tp_init = (initproc)pyrf_thread_map__init, 755 }; 756 757 static int pyrf_thread_map__setup_types(void) 758 { 759 pyrf_thread_map__type.tp_new = PyType_GenericNew; 760 return PyType_Ready(&pyrf_thread_map__type); 761 } 762 763 struct pyrf_evsel { 764 PyObject_HEAD 765 766 struct evsel evsel; 767 }; 768 769 static int pyrf_evsel__init(struct pyrf_evsel *pevsel, 770 PyObject *args, PyObject *kwargs) 771 { 772 struct perf_event_attr attr = { 773 .type = PERF_TYPE_HARDWARE, 774 .config = PERF_COUNT_HW_CPU_CYCLES, 775 .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID, 776 }; 777 static char *kwlist[] = { 778 "type", 779 "config", 780 "sample_freq", 781 "sample_period", 782 "sample_type", 783 "read_format", 784 "disabled", 785 "inherit", 786 "pinned", 787 "exclusive", 788 "exclude_user", 789 "exclude_kernel", 790 "exclude_hv", 791 "exclude_idle", 792 "mmap", 793 "context_switch", 794 "comm", 795 "freq", 796 "inherit_stat", 797 "enable_on_exec", 798 "task", 799 "watermark", 800 "precise_ip", 801 "mmap_data", 802 "sample_id_all", 803 "wakeup_events", 804 "bp_type", 805 "bp_addr", 806 "bp_len", 807 NULL 808 }; 809 u64 sample_period = 0; 810 u32 disabled = 0, 811 inherit = 0, 812 pinned = 0, 813 exclusive = 0, 814 exclude_user = 0, 815 exclude_kernel = 0, 816 exclude_hv = 0, 817 exclude_idle = 0, 818 mmap = 0, 819 context_switch = 0, 820 comm = 0, 821 freq = 1, 822 inherit_stat = 0, 823 enable_on_exec = 0, 824 task = 0, 825 watermark = 0, 826 precise_ip = 0, 827 mmap_data = 0, 828 sample_id_all = 1; 829 int idx = 0; 830 831 if (!PyArg_ParseTupleAndKeywords(args, kwargs, 832 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist, 833 &attr.type, &attr.config, &attr.sample_freq, 834 &sample_period, &attr.sample_type, 835 &attr.read_format, &disabled, &inherit, 836 &pinned, &exclusive, &exclude_user, 837 &exclude_kernel, &exclude_hv, &exclude_idle, 838 &mmap, &context_switch, &comm, &freq, &inherit_stat, 839 &enable_on_exec, &task, &watermark, 840 &precise_ip, &mmap_data, &sample_id_all, 841 &attr.wakeup_events, &attr.bp_type, 842 &attr.bp_addr, &attr.bp_len, &idx)) 843 return -1; 844 845 /* union... */ 846 if (sample_period != 0) { 847 if (attr.sample_freq != 0) 848 return -1; /* FIXME: throw right exception */ 849 attr.sample_period = sample_period; 850 } 851 852 /* Bitfields */ 853 attr.disabled = disabled; 854 attr.inherit = inherit; 855 attr.pinned = pinned; 856 attr.exclusive = exclusive; 857 attr.exclude_user = exclude_user; 858 attr.exclude_kernel = exclude_kernel; 859 attr.exclude_hv = exclude_hv; 860 attr.exclude_idle = exclude_idle; 861 attr.mmap = mmap; 862 attr.context_switch = context_switch; 863 attr.comm = comm; 864 attr.freq = freq; 865 attr.inherit_stat = inherit_stat; 866 attr.enable_on_exec = enable_on_exec; 867 attr.task = task; 868 attr.watermark = watermark; 869 attr.precise_ip = precise_ip; 870 attr.mmap_data = mmap_data; 871 attr.sample_id_all = sample_id_all; 872 attr.size = sizeof(attr); 873 874 evsel__init(&pevsel->evsel, &attr, idx); 875 return 0; 876 } 877 878 static void pyrf_evsel__delete(struct pyrf_evsel *pevsel) 879 { 880 evsel__exit(&pevsel->evsel); 881 Py_TYPE(pevsel)->tp_free((PyObject*)pevsel); 882 } 883 884 static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel, 885 PyObject *args, PyObject *kwargs) 886 { 887 struct evsel *evsel = &pevsel->evsel; 888 struct perf_cpu_map *cpus = NULL; 889 struct perf_thread_map *threads = NULL; 890 PyObject *pcpus = NULL, *pthreads = NULL; 891 int group = 0, inherit = 0; 892 static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL }; 893 894 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, 895 &pcpus, &pthreads, &group, &inherit)) 896 return NULL; 897 898 if (pthreads != NULL) 899 threads = ((struct pyrf_thread_map *)pthreads)->threads; 900 901 if (pcpus != NULL) 902 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; 903 904 evsel->core.attr.inherit = inherit; 905 /* 906 * This will group just the fds for this single evsel, to group 907 * multiple events, use evlist.open(). 908 */ 909 if (evsel__open(evsel, cpus, threads) < 0) { 910 PyErr_SetFromErrno(PyExc_OSError); 911 return NULL; 912 } 913 914 Py_INCREF(Py_None); 915 return Py_None; 916 } 917 918 static PyMethodDef pyrf_evsel__methods[] = { 919 { 920 .ml_name = "open", 921 .ml_meth = (PyCFunction)pyrf_evsel__open, 922 .ml_flags = METH_VARARGS | METH_KEYWORDS, 923 .ml_doc = PyDoc_STR("open the event selector file descriptor table.") 924 }, 925 { .ml_name = NULL, } 926 }; 927 928 static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object."); 929 930 static PyTypeObject pyrf_evsel__type = { 931 PyVarObject_HEAD_INIT(NULL, 0) 932 .tp_name = "perf.evsel", 933 .tp_basicsize = sizeof(struct pyrf_evsel), 934 .tp_dealloc = (destructor)pyrf_evsel__delete, 935 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 936 .tp_doc = pyrf_evsel__doc, 937 .tp_methods = pyrf_evsel__methods, 938 .tp_init = (initproc)pyrf_evsel__init, 939 }; 940 941 static int pyrf_evsel__setup_types(void) 942 { 943 pyrf_evsel__type.tp_new = PyType_GenericNew; 944 return PyType_Ready(&pyrf_evsel__type); 945 } 946 947 struct pyrf_evlist { 948 PyObject_HEAD 949 950 struct evlist evlist; 951 }; 952 953 static int pyrf_evlist__init(struct pyrf_evlist *pevlist, 954 PyObject *args, PyObject *kwargs __maybe_unused) 955 { 956 PyObject *pcpus = NULL, *pthreads = NULL; 957 struct perf_cpu_map *cpus; 958 struct perf_thread_map *threads; 959 960 if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads)) 961 return -1; 962 963 threads = ((struct pyrf_thread_map *)pthreads)->threads; 964 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; 965 evlist__init(&pevlist->evlist, cpus, threads); 966 return 0; 967 } 968 969 static void pyrf_evlist__delete(struct pyrf_evlist *pevlist) 970 { 971 evlist__exit(&pevlist->evlist); 972 Py_TYPE(pevlist)->tp_free((PyObject*)pevlist); 973 } 974 975 static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist, 976 PyObject *args, PyObject *kwargs) 977 { 978 struct evlist *evlist = &pevlist->evlist; 979 static char *kwlist[] = { "pages", "overwrite", NULL }; 980 int pages = 128, overwrite = false; 981 982 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist, 983 &pages, &overwrite)) 984 return NULL; 985 986 if (evlist__mmap(evlist, pages) < 0) { 987 PyErr_SetFromErrno(PyExc_OSError); 988 return NULL; 989 } 990 991 Py_INCREF(Py_None); 992 return Py_None; 993 } 994 995 static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist, 996 PyObject *args, PyObject *kwargs) 997 { 998 struct evlist *evlist = &pevlist->evlist; 999 static char *kwlist[] = { "timeout", NULL }; 1000 int timeout = -1, n; 1001 1002 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout)) 1003 return NULL; 1004 1005 n = evlist__poll(evlist, timeout); 1006 if (n < 0) { 1007 PyErr_SetFromErrno(PyExc_OSError); 1008 return NULL; 1009 } 1010 1011 return Py_BuildValue("i", n); 1012 } 1013 1014 static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist, 1015 PyObject *args __maybe_unused, 1016 PyObject *kwargs __maybe_unused) 1017 { 1018 struct evlist *evlist = &pevlist->evlist; 1019 PyObject *list = PyList_New(0); 1020 int i; 1021 1022 for (i = 0; i < evlist->core.pollfd.nr; ++i) { 1023 PyObject *file; 1024 #if PY_MAJOR_VERSION < 3 1025 FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r"); 1026 1027 if (fp == NULL) 1028 goto free_list; 1029 1030 file = PyFile_FromFile(fp, "perf", "r", NULL); 1031 #else 1032 file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1, 1033 NULL, NULL, NULL, 0); 1034 #endif 1035 if (file == NULL) 1036 goto free_list; 1037 1038 if (PyList_Append(list, file) != 0) { 1039 Py_DECREF(file); 1040 goto free_list; 1041 } 1042 1043 Py_DECREF(file); 1044 } 1045 1046 return list; 1047 free_list: 1048 return PyErr_NoMemory(); 1049 } 1050 1051 1052 static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist, 1053 PyObject *args, 1054 PyObject *kwargs __maybe_unused) 1055 { 1056 struct evlist *evlist = &pevlist->evlist; 1057 PyObject *pevsel; 1058 struct evsel *evsel; 1059 1060 if (!PyArg_ParseTuple(args, "O", &pevsel)) 1061 return NULL; 1062 1063 Py_INCREF(pevsel); 1064 evsel = &((struct pyrf_evsel *)pevsel)->evsel; 1065 evsel->core.idx = evlist->core.nr_entries; 1066 evlist__add(evlist, evsel); 1067 1068 return Py_BuildValue("i", evlist->core.nr_entries); 1069 } 1070 1071 static struct mmap *get_md(struct evlist *evlist, int cpu) 1072 { 1073 int i; 1074 1075 for (i = 0; i < evlist->core.nr_mmaps; i++) { 1076 struct mmap *md = &evlist->mmap[i]; 1077 1078 if (md->core.cpu.cpu == cpu) 1079 return md; 1080 } 1081 1082 return NULL; 1083 } 1084 1085 static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, 1086 PyObject *args, PyObject *kwargs) 1087 { 1088 struct evlist *evlist = &pevlist->evlist; 1089 union perf_event *event; 1090 int sample_id_all = 1, cpu; 1091 static char *kwlist[] = { "cpu", "sample_id_all", NULL }; 1092 struct mmap *md; 1093 int err; 1094 1095 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist, 1096 &cpu, &sample_id_all)) 1097 return NULL; 1098 1099 md = get_md(evlist, cpu); 1100 if (!md) 1101 return NULL; 1102 1103 if (perf_mmap__read_init(&md->core) < 0) 1104 goto end; 1105 1106 event = perf_mmap__read_event(&md->core); 1107 if (event != NULL) { 1108 PyObject *pyevent = pyrf_event__new(event); 1109 struct pyrf_event *pevent = (struct pyrf_event *)pyevent; 1110 struct evsel *evsel; 1111 1112 if (pyevent == NULL) 1113 return PyErr_NoMemory(); 1114 1115 evsel = evlist__event2evsel(evlist, event); 1116 if (!evsel) { 1117 Py_INCREF(Py_None); 1118 return Py_None; 1119 } 1120 1121 pevent->evsel = evsel; 1122 1123 err = evsel__parse_sample(evsel, event, &pevent->sample); 1124 1125 /* Consume the even only after we parsed it out. */ 1126 perf_mmap__consume(&md->core); 1127 1128 if (err) 1129 return PyErr_Format(PyExc_OSError, 1130 "perf: can't parse sample, err=%d", err); 1131 return pyevent; 1132 } 1133 end: 1134 Py_INCREF(Py_None); 1135 return Py_None; 1136 } 1137 1138 static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist, 1139 PyObject *args, PyObject *kwargs) 1140 { 1141 struct evlist *evlist = &pevlist->evlist; 1142 1143 if (evlist__open(evlist) < 0) { 1144 PyErr_SetFromErrno(PyExc_OSError); 1145 return NULL; 1146 } 1147 1148 Py_INCREF(Py_None); 1149 return Py_None; 1150 } 1151 1152 static PyMethodDef pyrf_evlist__methods[] = { 1153 { 1154 .ml_name = "mmap", 1155 .ml_meth = (PyCFunction)pyrf_evlist__mmap, 1156 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1157 .ml_doc = PyDoc_STR("mmap the file descriptor table.") 1158 }, 1159 { 1160 .ml_name = "open", 1161 .ml_meth = (PyCFunction)pyrf_evlist__open, 1162 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1163 .ml_doc = PyDoc_STR("open the file descriptors.") 1164 }, 1165 { 1166 .ml_name = "poll", 1167 .ml_meth = (PyCFunction)pyrf_evlist__poll, 1168 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1169 .ml_doc = PyDoc_STR("poll the file descriptor table.") 1170 }, 1171 { 1172 .ml_name = "get_pollfd", 1173 .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd, 1174 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1175 .ml_doc = PyDoc_STR("get the poll file descriptor table.") 1176 }, 1177 { 1178 .ml_name = "add", 1179 .ml_meth = (PyCFunction)pyrf_evlist__add, 1180 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1181 .ml_doc = PyDoc_STR("adds an event selector to the list.") 1182 }, 1183 { 1184 .ml_name = "read_on_cpu", 1185 .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu, 1186 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1187 .ml_doc = PyDoc_STR("reads an event.") 1188 }, 1189 { .ml_name = NULL, } 1190 }; 1191 1192 static Py_ssize_t pyrf_evlist__length(PyObject *obj) 1193 { 1194 struct pyrf_evlist *pevlist = (void *)obj; 1195 1196 return pevlist->evlist.core.nr_entries; 1197 } 1198 1199 static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i) 1200 { 1201 struct pyrf_evlist *pevlist = (void *)obj; 1202 struct evsel *pos; 1203 1204 if (i >= pevlist->evlist.core.nr_entries) 1205 return NULL; 1206 1207 evlist__for_each_entry(&pevlist->evlist, pos) { 1208 if (i-- == 0) 1209 break; 1210 } 1211 1212 return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel)); 1213 } 1214 1215 static PySequenceMethods pyrf_evlist__sequence_methods = { 1216 .sq_length = pyrf_evlist__length, 1217 .sq_item = pyrf_evlist__item, 1218 }; 1219 1220 static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object."); 1221 1222 static PyTypeObject pyrf_evlist__type = { 1223 PyVarObject_HEAD_INIT(NULL, 0) 1224 .tp_name = "perf.evlist", 1225 .tp_basicsize = sizeof(struct pyrf_evlist), 1226 .tp_dealloc = (destructor)pyrf_evlist__delete, 1227 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 1228 .tp_as_sequence = &pyrf_evlist__sequence_methods, 1229 .tp_doc = pyrf_evlist__doc, 1230 .tp_methods = pyrf_evlist__methods, 1231 .tp_init = (initproc)pyrf_evlist__init, 1232 }; 1233 1234 static int pyrf_evlist__setup_types(void) 1235 { 1236 pyrf_evlist__type.tp_new = PyType_GenericNew; 1237 return PyType_Ready(&pyrf_evlist__type); 1238 } 1239 1240 #define PERF_CONST(name) { #name, PERF_##name } 1241 1242 static struct { 1243 const char *name; 1244 int value; 1245 } perf__constants[] = { 1246 PERF_CONST(TYPE_HARDWARE), 1247 PERF_CONST(TYPE_SOFTWARE), 1248 PERF_CONST(TYPE_TRACEPOINT), 1249 PERF_CONST(TYPE_HW_CACHE), 1250 PERF_CONST(TYPE_RAW), 1251 PERF_CONST(TYPE_BREAKPOINT), 1252 1253 PERF_CONST(COUNT_HW_CPU_CYCLES), 1254 PERF_CONST(COUNT_HW_INSTRUCTIONS), 1255 PERF_CONST(COUNT_HW_CACHE_REFERENCES), 1256 PERF_CONST(COUNT_HW_CACHE_MISSES), 1257 PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS), 1258 PERF_CONST(COUNT_HW_BRANCH_MISSES), 1259 PERF_CONST(COUNT_HW_BUS_CYCLES), 1260 PERF_CONST(COUNT_HW_CACHE_L1D), 1261 PERF_CONST(COUNT_HW_CACHE_L1I), 1262 PERF_CONST(COUNT_HW_CACHE_LL), 1263 PERF_CONST(COUNT_HW_CACHE_DTLB), 1264 PERF_CONST(COUNT_HW_CACHE_ITLB), 1265 PERF_CONST(COUNT_HW_CACHE_BPU), 1266 PERF_CONST(COUNT_HW_CACHE_OP_READ), 1267 PERF_CONST(COUNT_HW_CACHE_OP_WRITE), 1268 PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH), 1269 PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS), 1270 PERF_CONST(COUNT_HW_CACHE_RESULT_MISS), 1271 1272 PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND), 1273 PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND), 1274 1275 PERF_CONST(COUNT_SW_CPU_CLOCK), 1276 PERF_CONST(COUNT_SW_TASK_CLOCK), 1277 PERF_CONST(COUNT_SW_PAGE_FAULTS), 1278 PERF_CONST(COUNT_SW_CONTEXT_SWITCHES), 1279 PERF_CONST(COUNT_SW_CPU_MIGRATIONS), 1280 PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN), 1281 PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ), 1282 PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS), 1283 PERF_CONST(COUNT_SW_EMULATION_FAULTS), 1284 PERF_CONST(COUNT_SW_DUMMY), 1285 1286 PERF_CONST(SAMPLE_IP), 1287 PERF_CONST(SAMPLE_TID), 1288 PERF_CONST(SAMPLE_TIME), 1289 PERF_CONST(SAMPLE_ADDR), 1290 PERF_CONST(SAMPLE_READ), 1291 PERF_CONST(SAMPLE_CALLCHAIN), 1292 PERF_CONST(SAMPLE_ID), 1293 PERF_CONST(SAMPLE_CPU), 1294 PERF_CONST(SAMPLE_PERIOD), 1295 PERF_CONST(SAMPLE_STREAM_ID), 1296 PERF_CONST(SAMPLE_RAW), 1297 1298 PERF_CONST(FORMAT_TOTAL_TIME_ENABLED), 1299 PERF_CONST(FORMAT_TOTAL_TIME_RUNNING), 1300 PERF_CONST(FORMAT_ID), 1301 PERF_CONST(FORMAT_GROUP), 1302 1303 PERF_CONST(RECORD_MMAP), 1304 PERF_CONST(RECORD_LOST), 1305 PERF_CONST(RECORD_COMM), 1306 PERF_CONST(RECORD_EXIT), 1307 PERF_CONST(RECORD_THROTTLE), 1308 PERF_CONST(RECORD_UNTHROTTLE), 1309 PERF_CONST(RECORD_FORK), 1310 PERF_CONST(RECORD_READ), 1311 PERF_CONST(RECORD_SAMPLE), 1312 PERF_CONST(RECORD_MMAP2), 1313 PERF_CONST(RECORD_AUX), 1314 PERF_CONST(RECORD_ITRACE_START), 1315 PERF_CONST(RECORD_LOST_SAMPLES), 1316 PERF_CONST(RECORD_SWITCH), 1317 PERF_CONST(RECORD_SWITCH_CPU_WIDE), 1318 1319 PERF_CONST(RECORD_MISC_SWITCH_OUT), 1320 { .name = NULL, }, 1321 }; 1322 1323 static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel, 1324 PyObject *args, PyObject *kwargs) 1325 { 1326 #ifndef HAVE_LIBTRACEEVENT 1327 return NULL; 1328 #else 1329 struct tep_event *tp_format; 1330 static char *kwlist[] = { "sys", "name", NULL }; 1331 char *sys = NULL; 1332 char *name = NULL; 1333 1334 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist, 1335 &sys, &name)) 1336 return NULL; 1337 1338 tp_format = trace_event__tp_format(sys, name); 1339 if (IS_ERR(tp_format)) 1340 return _PyLong_FromLong(-1); 1341 1342 return _PyLong_FromLong(tp_format->id); 1343 #endif // HAVE_LIBTRACEEVENT 1344 } 1345 1346 static PyMethodDef perf__methods[] = { 1347 { 1348 .ml_name = "tracepoint", 1349 .ml_meth = (PyCFunction) pyrf__tracepoint, 1350 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1351 .ml_doc = PyDoc_STR("Get tracepoint config.") 1352 }, 1353 { .ml_name = NULL, } 1354 }; 1355 1356 #if PY_MAJOR_VERSION < 3 1357 PyMODINIT_FUNC initperf(void) 1358 #else 1359 PyMODINIT_FUNC PyInit_perf(void) 1360 #endif 1361 { 1362 PyObject *obj; 1363 int i; 1364 PyObject *dict; 1365 #if PY_MAJOR_VERSION < 3 1366 PyObject *module = Py_InitModule("perf", perf__methods); 1367 #else 1368 static struct PyModuleDef moduledef = { 1369 PyModuleDef_HEAD_INIT, 1370 "perf", /* m_name */ 1371 "", /* m_doc */ 1372 -1, /* m_size */ 1373 perf__methods, /* m_methods */ 1374 NULL, /* m_reload */ 1375 NULL, /* m_traverse */ 1376 NULL, /* m_clear */ 1377 NULL, /* m_free */ 1378 }; 1379 PyObject *module = PyModule_Create(&moduledef); 1380 #endif 1381 1382 if (module == NULL || 1383 pyrf_event__setup_types() < 0 || 1384 pyrf_evlist__setup_types() < 0 || 1385 pyrf_evsel__setup_types() < 0 || 1386 pyrf_thread_map__setup_types() < 0 || 1387 pyrf_cpu_map__setup_types() < 0) 1388 #if PY_MAJOR_VERSION < 3 1389 return; 1390 #else 1391 return module; 1392 #endif 1393 1394 /* The page_size is placed in util object. */ 1395 page_size = sysconf(_SC_PAGE_SIZE); 1396 1397 Py_INCREF(&pyrf_evlist__type); 1398 PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type); 1399 1400 Py_INCREF(&pyrf_evsel__type); 1401 PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type); 1402 1403 Py_INCREF(&pyrf_mmap_event__type); 1404 PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type); 1405 1406 Py_INCREF(&pyrf_lost_event__type); 1407 PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type); 1408 1409 Py_INCREF(&pyrf_comm_event__type); 1410 PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type); 1411 1412 Py_INCREF(&pyrf_task_event__type); 1413 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type); 1414 1415 Py_INCREF(&pyrf_throttle_event__type); 1416 PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type); 1417 1418 Py_INCREF(&pyrf_task_event__type); 1419 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type); 1420 1421 Py_INCREF(&pyrf_read_event__type); 1422 PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type); 1423 1424 Py_INCREF(&pyrf_sample_event__type); 1425 PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type); 1426 1427 Py_INCREF(&pyrf_context_switch_event__type); 1428 PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type); 1429 1430 Py_INCREF(&pyrf_thread_map__type); 1431 PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type); 1432 1433 Py_INCREF(&pyrf_cpu_map__type); 1434 PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type); 1435 1436 dict = PyModule_GetDict(module); 1437 if (dict == NULL) 1438 goto error; 1439 1440 for (i = 0; perf__constants[i].name != NULL; i++) { 1441 obj = _PyLong_FromLong(perf__constants[i].value); 1442 if (obj == NULL) 1443 goto error; 1444 PyDict_SetItemString(dict, perf__constants[i].name, obj); 1445 Py_DECREF(obj); 1446 } 1447 1448 error: 1449 if (PyErr_Occurred()) 1450 PyErr_SetString(PyExc_ImportError, "perf: Init failed!"); 1451 #if PY_MAJOR_VERSION >= 3 1452 return module; 1453 #endif 1454 } 1455 1456 /* 1457 * Dummy, to avoid dragging all the test_attr infrastructure in the python 1458 * binding. 1459 */ 1460 void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu, 1461 int fd, int group_fd, unsigned long flags) 1462 { 1463 } 1464