1 // SPDX-License-Identifier: GPL-2.0 2 #include <Python.h> 3 #include <structmember.h> 4 #include <inttypes.h> 5 #include <poll.h> 6 #include <linux/err.h> 7 #include <perf/cpumap.h> 8 #ifdef HAVE_LIBTRACEEVENT 9 #include <traceevent/event-parse.h> 10 #endif 11 #include <perf/mmap.h> 12 #include "evlist.h" 13 #include "callchain.h" 14 #include "evsel.h" 15 #include "event.h" 16 #include "print_binary.h" 17 #include "thread_map.h" 18 #include "trace-event.h" 19 #include "mmap.h" 20 #include "stat.h" 21 #include "metricgroup.h" 22 #include "util/env.h" 23 #include "util/pmu.h" 24 #include <internal/lib.h> 25 #include "util.h" 26 27 #if PY_MAJOR_VERSION < 3 28 #define _PyUnicode_FromString(arg) \ 29 PyString_FromString(arg) 30 #define _PyUnicode_AsString(arg) \ 31 PyString_AsString(arg) 32 #define _PyUnicode_FromFormat(...) \ 33 PyString_FromFormat(__VA_ARGS__) 34 #define _PyLong_FromLong(arg) \ 35 PyInt_FromLong(arg) 36 37 #else 38 39 #define _PyUnicode_FromString(arg) \ 40 PyUnicode_FromString(arg) 41 #define _PyUnicode_FromFormat(...) \ 42 PyUnicode_FromFormat(__VA_ARGS__) 43 #define _PyLong_FromLong(arg) \ 44 PyLong_FromLong(arg) 45 #endif 46 47 #ifndef Py_TYPE 48 #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) 49 #endif 50 51 /* 52 * Provide these two so that we don't have to link against callchain.c and 53 * start dragging hist.c, etc. 54 */ 55 struct callchain_param callchain_param; 56 57 int parse_callchain_record(const char *arg __maybe_unused, 58 struct callchain_param *param __maybe_unused) 59 { 60 return 0; 61 } 62 63 /* 64 * Add these not to drag util/env.c 65 */ 66 struct perf_env perf_env; 67 68 const char *perf_env__cpuid(struct perf_env *env __maybe_unused) 69 { 70 return NULL; 71 } 72 73 // This one is a bit easier, wouldn't drag too much, but leave it as a stub we need it here 74 const char *perf_env__arch(struct perf_env *env __maybe_unused) 75 { 76 return NULL; 77 } 78 79 /* 80 * Add this one here not to drag util/stat-shadow.c 81 */ 82 void perf_stat__collect_metric_expr(struct evlist *evsel_list) 83 { 84 } 85 86 /* 87 * These ones are needed not to drag the PMU bandwagon, jevents generated 88 * pmu_sys_event_tables, etc and evsel__find_pmu() is used so far just for 89 * doing per PMU perf_event_attr.exclude_guest handling, not really needed, so 90 * far, for the perf python binding known usecases, revisit if this become 91 * necessary. 92 */ 93 struct perf_pmu *evsel__find_pmu(struct evsel *evsel __maybe_unused) 94 { 95 return NULL; 96 } 97 98 int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, ...) 99 { 100 return EOF; 101 } 102 103 /* 104 * Add this one here not to drag util/metricgroup.c 105 */ 106 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp, 107 struct rblist *new_metric_events, 108 struct rblist *old_metric_events) 109 { 110 return 0; 111 } 112 113 /* 114 * XXX: All these evsel destructors need some better mechanism, like a linked 115 * list of destructors registered when the relevant code indeed is used instead 116 * of having more and more calls in perf_evsel__delete(). -- acme 117 * 118 * For now, add some more: 119 * 120 * Not to drag the BPF bandwagon... 121 */ 122 void bpf_counter__destroy(struct evsel *evsel); 123 int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd); 124 int bpf_counter__disable(struct evsel *evsel); 125 126 void bpf_counter__destroy(struct evsel *evsel __maybe_unused) 127 { 128 } 129 130 int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_unused, int fd __maybe_unused) 131 { 132 return 0; 133 } 134 135 int bpf_counter__disable(struct evsel *evsel __maybe_unused) 136 { 137 return 0; 138 } 139 140 /* 141 * Support debug printing even though util/debug.c is not linked. That means 142 * implementing 'verbose' and 'eprintf'. 143 */ 144 int verbose; 145 int debug_peo_args; 146 147 int eprintf(int level, int var, const char *fmt, ...); 148 149 int eprintf(int level, int var, const char *fmt, ...) 150 { 151 va_list args; 152 int ret = 0; 153 154 if (var >= level) { 155 va_start(args, fmt); 156 ret = vfprintf(stderr, fmt, args); 157 va_end(args); 158 } 159 160 return ret; 161 } 162 163 /* Define PyVarObject_HEAD_INIT for python 2.5 */ 164 #ifndef PyVarObject_HEAD_INIT 165 # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, 166 #endif 167 168 #if PY_MAJOR_VERSION < 3 169 PyMODINIT_FUNC initperf(void); 170 #else 171 PyMODINIT_FUNC PyInit_perf(void); 172 #endif 173 174 #define member_def(type, member, ptype, help) \ 175 { #member, ptype, \ 176 offsetof(struct pyrf_event, event) + offsetof(struct type, member), \ 177 0, help } 178 179 #define sample_member_def(name, member, ptype, help) \ 180 { #name, ptype, \ 181 offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \ 182 0, help } 183 184 struct pyrf_event { 185 PyObject_HEAD 186 struct evsel *evsel; 187 struct perf_sample sample; 188 union perf_event event; 189 }; 190 191 #define sample_members \ 192 sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \ 193 sample_member_def(sample_pid, pid, T_INT, "event pid"), \ 194 sample_member_def(sample_tid, tid, T_INT, "event tid"), \ 195 sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \ 196 sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \ 197 sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \ 198 sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \ 199 sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \ 200 sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"), 201 202 static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object."); 203 204 static PyMemberDef pyrf_mmap_event__members[] = { 205 sample_members 206 member_def(perf_event_header, type, T_UINT, "event type"), 207 member_def(perf_event_header, misc, T_UINT, "event misc"), 208 member_def(perf_record_mmap, pid, T_UINT, "event pid"), 209 member_def(perf_record_mmap, tid, T_UINT, "event tid"), 210 member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"), 211 member_def(perf_record_mmap, len, T_ULONGLONG, "map length"), 212 member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"), 213 member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"), 214 { .name = NULL, }, 215 }; 216 217 static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent) 218 { 219 PyObject *ret; 220 char *s; 221 222 if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", " 223 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", " 224 "filename: %s }", 225 pevent->event.mmap.pid, pevent->event.mmap.tid, 226 pevent->event.mmap.start, pevent->event.mmap.len, 227 pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) { 228 ret = PyErr_NoMemory(); 229 } else { 230 ret = _PyUnicode_FromString(s); 231 free(s); 232 } 233 return ret; 234 } 235 236 static PyTypeObject pyrf_mmap_event__type = { 237 PyVarObject_HEAD_INIT(NULL, 0) 238 .tp_name = "perf.mmap_event", 239 .tp_basicsize = sizeof(struct pyrf_event), 240 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 241 .tp_doc = pyrf_mmap_event__doc, 242 .tp_members = pyrf_mmap_event__members, 243 .tp_repr = (reprfunc)pyrf_mmap_event__repr, 244 }; 245 246 static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object."); 247 248 static PyMemberDef pyrf_task_event__members[] = { 249 sample_members 250 member_def(perf_event_header, type, T_UINT, "event type"), 251 member_def(perf_record_fork, pid, T_UINT, "event pid"), 252 member_def(perf_record_fork, ppid, T_UINT, "event ppid"), 253 member_def(perf_record_fork, tid, T_UINT, "event tid"), 254 member_def(perf_record_fork, ptid, T_UINT, "event ptid"), 255 member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"), 256 { .name = NULL, }, 257 }; 258 259 static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent) 260 { 261 return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, " 262 "ptid: %u, time: %" PRI_lu64 "}", 263 pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit", 264 pevent->event.fork.pid, 265 pevent->event.fork.ppid, 266 pevent->event.fork.tid, 267 pevent->event.fork.ptid, 268 pevent->event.fork.time); 269 } 270 271 static PyTypeObject pyrf_task_event__type = { 272 PyVarObject_HEAD_INIT(NULL, 0) 273 .tp_name = "perf.task_event", 274 .tp_basicsize = sizeof(struct pyrf_event), 275 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 276 .tp_doc = pyrf_task_event__doc, 277 .tp_members = pyrf_task_event__members, 278 .tp_repr = (reprfunc)pyrf_task_event__repr, 279 }; 280 281 static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object."); 282 283 static PyMemberDef pyrf_comm_event__members[] = { 284 sample_members 285 member_def(perf_event_header, type, T_UINT, "event type"), 286 member_def(perf_record_comm, pid, T_UINT, "event pid"), 287 member_def(perf_record_comm, tid, T_UINT, "event tid"), 288 member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"), 289 { .name = NULL, }, 290 }; 291 292 static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent) 293 { 294 return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }", 295 pevent->event.comm.pid, 296 pevent->event.comm.tid, 297 pevent->event.comm.comm); 298 } 299 300 static PyTypeObject pyrf_comm_event__type = { 301 PyVarObject_HEAD_INIT(NULL, 0) 302 .tp_name = "perf.comm_event", 303 .tp_basicsize = sizeof(struct pyrf_event), 304 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 305 .tp_doc = pyrf_comm_event__doc, 306 .tp_members = pyrf_comm_event__members, 307 .tp_repr = (reprfunc)pyrf_comm_event__repr, 308 }; 309 310 static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object."); 311 312 static PyMemberDef pyrf_throttle_event__members[] = { 313 sample_members 314 member_def(perf_event_header, type, T_UINT, "event type"), 315 member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"), 316 member_def(perf_record_throttle, id, T_ULONGLONG, "event id"), 317 member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"), 318 { .name = NULL, }, 319 }; 320 321 static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent) 322 { 323 struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1); 324 325 return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64 326 ", stream_id: %" PRI_lu64 " }", 327 pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un", 328 te->time, te->id, te->stream_id); 329 } 330 331 static PyTypeObject pyrf_throttle_event__type = { 332 PyVarObject_HEAD_INIT(NULL, 0) 333 .tp_name = "perf.throttle_event", 334 .tp_basicsize = sizeof(struct pyrf_event), 335 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 336 .tp_doc = pyrf_throttle_event__doc, 337 .tp_members = pyrf_throttle_event__members, 338 .tp_repr = (reprfunc)pyrf_throttle_event__repr, 339 }; 340 341 static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object."); 342 343 static PyMemberDef pyrf_lost_event__members[] = { 344 sample_members 345 member_def(perf_record_lost, id, T_ULONGLONG, "event id"), 346 member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"), 347 { .name = NULL, }, 348 }; 349 350 static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent) 351 { 352 PyObject *ret; 353 char *s; 354 355 if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", " 356 "lost: %#" PRI_lx64 " }", 357 pevent->event.lost.id, pevent->event.lost.lost) < 0) { 358 ret = PyErr_NoMemory(); 359 } else { 360 ret = _PyUnicode_FromString(s); 361 free(s); 362 } 363 return ret; 364 } 365 366 static PyTypeObject pyrf_lost_event__type = { 367 PyVarObject_HEAD_INIT(NULL, 0) 368 .tp_name = "perf.lost_event", 369 .tp_basicsize = sizeof(struct pyrf_event), 370 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 371 .tp_doc = pyrf_lost_event__doc, 372 .tp_members = pyrf_lost_event__members, 373 .tp_repr = (reprfunc)pyrf_lost_event__repr, 374 }; 375 376 static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object."); 377 378 static PyMemberDef pyrf_read_event__members[] = { 379 sample_members 380 member_def(perf_record_read, pid, T_UINT, "event pid"), 381 member_def(perf_record_read, tid, T_UINT, "event tid"), 382 { .name = NULL, }, 383 }; 384 385 static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent) 386 { 387 return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }", 388 pevent->event.read.pid, 389 pevent->event.read.tid); 390 /* 391 * FIXME: return the array of read values, 392 * making this method useful ;-) 393 */ 394 } 395 396 static PyTypeObject pyrf_read_event__type = { 397 PyVarObject_HEAD_INIT(NULL, 0) 398 .tp_name = "perf.read_event", 399 .tp_basicsize = sizeof(struct pyrf_event), 400 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 401 .tp_doc = pyrf_read_event__doc, 402 .tp_members = pyrf_read_event__members, 403 .tp_repr = (reprfunc)pyrf_read_event__repr, 404 }; 405 406 static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object."); 407 408 static PyMemberDef pyrf_sample_event__members[] = { 409 sample_members 410 member_def(perf_event_header, type, T_UINT, "event type"), 411 { .name = NULL, }, 412 }; 413 414 static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent) 415 { 416 PyObject *ret; 417 char *s; 418 419 if (asprintf(&s, "{ type: sample }") < 0) { 420 ret = PyErr_NoMemory(); 421 } else { 422 ret = _PyUnicode_FromString(s); 423 free(s); 424 } 425 return ret; 426 } 427 428 #ifdef HAVE_LIBTRACEEVENT 429 static bool is_tracepoint(struct pyrf_event *pevent) 430 { 431 return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT; 432 } 433 434 static PyObject* 435 tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field) 436 { 437 struct tep_handle *pevent = field->event->tep; 438 void *data = pe->sample.raw_data; 439 PyObject *ret = NULL; 440 unsigned long long val; 441 unsigned int offset, len; 442 443 if (field->flags & TEP_FIELD_IS_ARRAY) { 444 offset = field->offset; 445 len = field->size; 446 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 447 val = tep_read_number(pevent, data + offset, len); 448 offset = val; 449 len = offset >> 16; 450 offset &= 0xffff; 451 if (tep_field_is_relative(field->flags)) 452 offset += field->offset + field->size; 453 } 454 if (field->flags & TEP_FIELD_IS_STRING && 455 is_printable_array(data + offset, len)) { 456 ret = _PyUnicode_FromString((char *)data + offset); 457 } else { 458 ret = PyByteArray_FromStringAndSize((const char *) data + offset, len); 459 field->flags &= ~TEP_FIELD_IS_STRING; 460 } 461 } else { 462 val = tep_read_number(pevent, data + field->offset, 463 field->size); 464 if (field->flags & TEP_FIELD_IS_POINTER) 465 ret = PyLong_FromUnsignedLong((unsigned long) val); 466 else if (field->flags & TEP_FIELD_IS_SIGNED) 467 ret = PyLong_FromLong((long) val); 468 else 469 ret = PyLong_FromUnsignedLong((unsigned long) val); 470 } 471 472 return ret; 473 } 474 475 static PyObject* 476 get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name) 477 { 478 const char *str = _PyUnicode_AsString(PyObject_Str(attr_name)); 479 struct evsel *evsel = pevent->evsel; 480 struct tep_format_field *field; 481 482 if (!evsel->tp_format) { 483 struct tep_event *tp_format; 484 485 tp_format = trace_event__tp_format_id(evsel->core.attr.config); 486 if (IS_ERR_OR_NULL(tp_format)) 487 return NULL; 488 489 evsel->tp_format = tp_format; 490 } 491 492 field = tep_find_any_field(evsel->tp_format, str); 493 if (!field) 494 return NULL; 495 496 return tracepoint_field(pevent, field); 497 } 498 #endif /* HAVE_LIBTRACEEVENT */ 499 500 static PyObject* 501 pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name) 502 { 503 PyObject *obj = NULL; 504 505 #ifdef HAVE_LIBTRACEEVENT 506 if (is_tracepoint(pevent)) 507 obj = get_tracepoint_field(pevent, attr_name); 508 #endif 509 510 return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name); 511 } 512 513 static PyTypeObject pyrf_sample_event__type = { 514 PyVarObject_HEAD_INIT(NULL, 0) 515 .tp_name = "perf.sample_event", 516 .tp_basicsize = sizeof(struct pyrf_event), 517 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 518 .tp_doc = pyrf_sample_event__doc, 519 .tp_members = pyrf_sample_event__members, 520 .tp_repr = (reprfunc)pyrf_sample_event__repr, 521 .tp_getattro = (getattrofunc) pyrf_sample_event__getattro, 522 }; 523 524 static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object."); 525 526 static PyMemberDef pyrf_context_switch_event__members[] = { 527 sample_members 528 member_def(perf_event_header, type, T_UINT, "event type"), 529 member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"), 530 member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"), 531 { .name = NULL, }, 532 }; 533 534 static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent) 535 { 536 PyObject *ret; 537 char *s; 538 539 if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }", 540 pevent->event.context_switch.next_prev_pid, 541 pevent->event.context_switch.next_prev_tid, 542 !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) { 543 ret = PyErr_NoMemory(); 544 } else { 545 ret = _PyUnicode_FromString(s); 546 free(s); 547 } 548 return ret; 549 } 550 551 static PyTypeObject pyrf_context_switch_event__type = { 552 PyVarObject_HEAD_INIT(NULL, 0) 553 .tp_name = "perf.context_switch_event", 554 .tp_basicsize = sizeof(struct pyrf_event), 555 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 556 .tp_doc = pyrf_context_switch_event__doc, 557 .tp_members = pyrf_context_switch_event__members, 558 .tp_repr = (reprfunc)pyrf_context_switch_event__repr, 559 }; 560 561 static int pyrf_event__setup_types(void) 562 { 563 int err; 564 pyrf_mmap_event__type.tp_new = 565 pyrf_task_event__type.tp_new = 566 pyrf_comm_event__type.tp_new = 567 pyrf_lost_event__type.tp_new = 568 pyrf_read_event__type.tp_new = 569 pyrf_sample_event__type.tp_new = 570 pyrf_context_switch_event__type.tp_new = 571 pyrf_throttle_event__type.tp_new = PyType_GenericNew; 572 err = PyType_Ready(&pyrf_mmap_event__type); 573 if (err < 0) 574 goto out; 575 err = PyType_Ready(&pyrf_lost_event__type); 576 if (err < 0) 577 goto out; 578 err = PyType_Ready(&pyrf_task_event__type); 579 if (err < 0) 580 goto out; 581 err = PyType_Ready(&pyrf_comm_event__type); 582 if (err < 0) 583 goto out; 584 err = PyType_Ready(&pyrf_throttle_event__type); 585 if (err < 0) 586 goto out; 587 err = PyType_Ready(&pyrf_read_event__type); 588 if (err < 0) 589 goto out; 590 err = PyType_Ready(&pyrf_sample_event__type); 591 if (err < 0) 592 goto out; 593 err = PyType_Ready(&pyrf_context_switch_event__type); 594 if (err < 0) 595 goto out; 596 out: 597 return err; 598 } 599 600 static PyTypeObject *pyrf_event__type[] = { 601 [PERF_RECORD_MMAP] = &pyrf_mmap_event__type, 602 [PERF_RECORD_LOST] = &pyrf_lost_event__type, 603 [PERF_RECORD_COMM] = &pyrf_comm_event__type, 604 [PERF_RECORD_EXIT] = &pyrf_task_event__type, 605 [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type, 606 [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type, 607 [PERF_RECORD_FORK] = &pyrf_task_event__type, 608 [PERF_RECORD_READ] = &pyrf_read_event__type, 609 [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type, 610 [PERF_RECORD_SWITCH] = &pyrf_context_switch_event__type, 611 [PERF_RECORD_SWITCH_CPU_WIDE] = &pyrf_context_switch_event__type, 612 }; 613 614 static PyObject *pyrf_event__new(union perf_event *event) 615 { 616 struct pyrf_event *pevent; 617 PyTypeObject *ptype; 618 619 if ((event->header.type < PERF_RECORD_MMAP || 620 event->header.type > PERF_RECORD_SAMPLE) && 621 !(event->header.type == PERF_RECORD_SWITCH || 622 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)) 623 return NULL; 624 625 ptype = pyrf_event__type[event->header.type]; 626 pevent = PyObject_New(struct pyrf_event, ptype); 627 if (pevent != NULL) 628 memcpy(&pevent->event, event, event->header.size); 629 return (PyObject *)pevent; 630 } 631 632 struct pyrf_cpu_map { 633 PyObject_HEAD 634 635 struct perf_cpu_map *cpus; 636 }; 637 638 static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus, 639 PyObject *args, PyObject *kwargs) 640 { 641 static char *kwlist[] = { "cpustr", NULL }; 642 char *cpustr = NULL; 643 644 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s", 645 kwlist, &cpustr)) 646 return -1; 647 648 pcpus->cpus = perf_cpu_map__new(cpustr); 649 if (pcpus->cpus == NULL) 650 return -1; 651 return 0; 652 } 653 654 static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus) 655 { 656 perf_cpu_map__put(pcpus->cpus); 657 Py_TYPE(pcpus)->tp_free((PyObject*)pcpus); 658 } 659 660 static Py_ssize_t pyrf_cpu_map__length(PyObject *obj) 661 { 662 struct pyrf_cpu_map *pcpus = (void *)obj; 663 664 return perf_cpu_map__nr(pcpus->cpus); 665 } 666 667 static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i) 668 { 669 struct pyrf_cpu_map *pcpus = (void *)obj; 670 671 if (i >= perf_cpu_map__nr(pcpus->cpus)) 672 return NULL; 673 674 return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu); 675 } 676 677 static PySequenceMethods pyrf_cpu_map__sequence_methods = { 678 .sq_length = pyrf_cpu_map__length, 679 .sq_item = pyrf_cpu_map__item, 680 }; 681 682 static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object."); 683 684 static PyTypeObject pyrf_cpu_map__type = { 685 PyVarObject_HEAD_INIT(NULL, 0) 686 .tp_name = "perf.cpu_map", 687 .tp_basicsize = sizeof(struct pyrf_cpu_map), 688 .tp_dealloc = (destructor)pyrf_cpu_map__delete, 689 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 690 .tp_doc = pyrf_cpu_map__doc, 691 .tp_as_sequence = &pyrf_cpu_map__sequence_methods, 692 .tp_init = (initproc)pyrf_cpu_map__init, 693 }; 694 695 static int pyrf_cpu_map__setup_types(void) 696 { 697 pyrf_cpu_map__type.tp_new = PyType_GenericNew; 698 return PyType_Ready(&pyrf_cpu_map__type); 699 } 700 701 struct pyrf_thread_map { 702 PyObject_HEAD 703 704 struct perf_thread_map *threads; 705 }; 706 707 static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads, 708 PyObject *args, PyObject *kwargs) 709 { 710 static char *kwlist[] = { "pid", "tid", "uid", NULL }; 711 int pid = -1, tid = -1, uid = UINT_MAX; 712 713 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii", 714 kwlist, &pid, &tid, &uid)) 715 return -1; 716 717 pthreads->threads = thread_map__new(pid, tid, uid); 718 if (pthreads->threads == NULL) 719 return -1; 720 return 0; 721 } 722 723 static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads) 724 { 725 perf_thread_map__put(pthreads->threads); 726 Py_TYPE(pthreads)->tp_free((PyObject*)pthreads); 727 } 728 729 static Py_ssize_t pyrf_thread_map__length(PyObject *obj) 730 { 731 struct pyrf_thread_map *pthreads = (void *)obj; 732 733 return perf_thread_map__nr(pthreads->threads); 734 } 735 736 static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i) 737 { 738 struct pyrf_thread_map *pthreads = (void *)obj; 739 740 if (i >= perf_thread_map__nr(pthreads->threads)) 741 return NULL; 742 743 return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i)); 744 } 745 746 static PySequenceMethods pyrf_thread_map__sequence_methods = { 747 .sq_length = pyrf_thread_map__length, 748 .sq_item = pyrf_thread_map__item, 749 }; 750 751 static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object."); 752 753 static PyTypeObject pyrf_thread_map__type = { 754 PyVarObject_HEAD_INIT(NULL, 0) 755 .tp_name = "perf.thread_map", 756 .tp_basicsize = sizeof(struct pyrf_thread_map), 757 .tp_dealloc = (destructor)pyrf_thread_map__delete, 758 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 759 .tp_doc = pyrf_thread_map__doc, 760 .tp_as_sequence = &pyrf_thread_map__sequence_methods, 761 .tp_init = (initproc)pyrf_thread_map__init, 762 }; 763 764 static int pyrf_thread_map__setup_types(void) 765 { 766 pyrf_thread_map__type.tp_new = PyType_GenericNew; 767 return PyType_Ready(&pyrf_thread_map__type); 768 } 769 770 struct pyrf_evsel { 771 PyObject_HEAD 772 773 struct evsel evsel; 774 }; 775 776 static int pyrf_evsel__init(struct pyrf_evsel *pevsel, 777 PyObject *args, PyObject *kwargs) 778 { 779 struct perf_event_attr attr = { 780 .type = PERF_TYPE_HARDWARE, 781 .config = PERF_COUNT_HW_CPU_CYCLES, 782 .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID, 783 }; 784 static char *kwlist[] = { 785 "type", 786 "config", 787 "sample_freq", 788 "sample_period", 789 "sample_type", 790 "read_format", 791 "disabled", 792 "inherit", 793 "pinned", 794 "exclusive", 795 "exclude_user", 796 "exclude_kernel", 797 "exclude_hv", 798 "exclude_idle", 799 "mmap", 800 "context_switch", 801 "comm", 802 "freq", 803 "inherit_stat", 804 "enable_on_exec", 805 "task", 806 "watermark", 807 "precise_ip", 808 "mmap_data", 809 "sample_id_all", 810 "wakeup_events", 811 "bp_type", 812 "bp_addr", 813 "bp_len", 814 NULL 815 }; 816 u64 sample_period = 0; 817 u32 disabled = 0, 818 inherit = 0, 819 pinned = 0, 820 exclusive = 0, 821 exclude_user = 0, 822 exclude_kernel = 0, 823 exclude_hv = 0, 824 exclude_idle = 0, 825 mmap = 0, 826 context_switch = 0, 827 comm = 0, 828 freq = 1, 829 inherit_stat = 0, 830 enable_on_exec = 0, 831 task = 0, 832 watermark = 0, 833 precise_ip = 0, 834 mmap_data = 0, 835 sample_id_all = 1; 836 int idx = 0; 837 838 if (!PyArg_ParseTupleAndKeywords(args, kwargs, 839 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist, 840 &attr.type, &attr.config, &attr.sample_freq, 841 &sample_period, &attr.sample_type, 842 &attr.read_format, &disabled, &inherit, 843 &pinned, &exclusive, &exclude_user, 844 &exclude_kernel, &exclude_hv, &exclude_idle, 845 &mmap, &context_switch, &comm, &freq, &inherit_stat, 846 &enable_on_exec, &task, &watermark, 847 &precise_ip, &mmap_data, &sample_id_all, 848 &attr.wakeup_events, &attr.bp_type, 849 &attr.bp_addr, &attr.bp_len, &idx)) 850 return -1; 851 852 /* union... */ 853 if (sample_period != 0) { 854 if (attr.sample_freq != 0) 855 return -1; /* FIXME: throw right exception */ 856 attr.sample_period = sample_period; 857 } 858 859 /* Bitfields */ 860 attr.disabled = disabled; 861 attr.inherit = inherit; 862 attr.pinned = pinned; 863 attr.exclusive = exclusive; 864 attr.exclude_user = exclude_user; 865 attr.exclude_kernel = exclude_kernel; 866 attr.exclude_hv = exclude_hv; 867 attr.exclude_idle = exclude_idle; 868 attr.mmap = mmap; 869 attr.context_switch = context_switch; 870 attr.comm = comm; 871 attr.freq = freq; 872 attr.inherit_stat = inherit_stat; 873 attr.enable_on_exec = enable_on_exec; 874 attr.task = task; 875 attr.watermark = watermark; 876 attr.precise_ip = precise_ip; 877 attr.mmap_data = mmap_data; 878 attr.sample_id_all = sample_id_all; 879 attr.size = sizeof(attr); 880 881 evsel__init(&pevsel->evsel, &attr, idx); 882 return 0; 883 } 884 885 static void pyrf_evsel__delete(struct pyrf_evsel *pevsel) 886 { 887 evsel__exit(&pevsel->evsel); 888 Py_TYPE(pevsel)->tp_free((PyObject*)pevsel); 889 } 890 891 static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel, 892 PyObject *args, PyObject *kwargs) 893 { 894 struct evsel *evsel = &pevsel->evsel; 895 struct perf_cpu_map *cpus = NULL; 896 struct perf_thread_map *threads = NULL; 897 PyObject *pcpus = NULL, *pthreads = NULL; 898 int group = 0, inherit = 0; 899 static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL }; 900 901 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, 902 &pcpus, &pthreads, &group, &inherit)) 903 return NULL; 904 905 if (pthreads != NULL) 906 threads = ((struct pyrf_thread_map *)pthreads)->threads; 907 908 if (pcpus != NULL) 909 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; 910 911 evsel->core.attr.inherit = inherit; 912 /* 913 * This will group just the fds for this single evsel, to group 914 * multiple events, use evlist.open(). 915 */ 916 if (evsel__open(evsel, cpus, threads) < 0) { 917 PyErr_SetFromErrno(PyExc_OSError); 918 return NULL; 919 } 920 921 Py_INCREF(Py_None); 922 return Py_None; 923 } 924 925 static PyMethodDef pyrf_evsel__methods[] = { 926 { 927 .ml_name = "open", 928 .ml_meth = (PyCFunction)pyrf_evsel__open, 929 .ml_flags = METH_VARARGS | METH_KEYWORDS, 930 .ml_doc = PyDoc_STR("open the event selector file descriptor table.") 931 }, 932 { .ml_name = NULL, } 933 }; 934 935 static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object."); 936 937 static PyTypeObject pyrf_evsel__type = { 938 PyVarObject_HEAD_INIT(NULL, 0) 939 .tp_name = "perf.evsel", 940 .tp_basicsize = sizeof(struct pyrf_evsel), 941 .tp_dealloc = (destructor)pyrf_evsel__delete, 942 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 943 .tp_doc = pyrf_evsel__doc, 944 .tp_methods = pyrf_evsel__methods, 945 .tp_init = (initproc)pyrf_evsel__init, 946 }; 947 948 static int pyrf_evsel__setup_types(void) 949 { 950 pyrf_evsel__type.tp_new = PyType_GenericNew; 951 return PyType_Ready(&pyrf_evsel__type); 952 } 953 954 struct pyrf_evlist { 955 PyObject_HEAD 956 957 struct evlist evlist; 958 }; 959 960 static int pyrf_evlist__init(struct pyrf_evlist *pevlist, 961 PyObject *args, PyObject *kwargs __maybe_unused) 962 { 963 PyObject *pcpus = NULL, *pthreads = NULL; 964 struct perf_cpu_map *cpus; 965 struct perf_thread_map *threads; 966 967 if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads)) 968 return -1; 969 970 threads = ((struct pyrf_thread_map *)pthreads)->threads; 971 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; 972 evlist__init(&pevlist->evlist, cpus, threads); 973 return 0; 974 } 975 976 static void pyrf_evlist__delete(struct pyrf_evlist *pevlist) 977 { 978 evlist__exit(&pevlist->evlist); 979 Py_TYPE(pevlist)->tp_free((PyObject*)pevlist); 980 } 981 982 static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist, 983 PyObject *args, PyObject *kwargs) 984 { 985 struct evlist *evlist = &pevlist->evlist; 986 static char *kwlist[] = { "pages", "overwrite", NULL }; 987 int pages = 128, overwrite = false; 988 989 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist, 990 &pages, &overwrite)) 991 return NULL; 992 993 if (evlist__mmap(evlist, pages) < 0) { 994 PyErr_SetFromErrno(PyExc_OSError); 995 return NULL; 996 } 997 998 Py_INCREF(Py_None); 999 return Py_None; 1000 } 1001 1002 static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist, 1003 PyObject *args, PyObject *kwargs) 1004 { 1005 struct evlist *evlist = &pevlist->evlist; 1006 static char *kwlist[] = { "timeout", NULL }; 1007 int timeout = -1, n; 1008 1009 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout)) 1010 return NULL; 1011 1012 n = evlist__poll(evlist, timeout); 1013 if (n < 0) { 1014 PyErr_SetFromErrno(PyExc_OSError); 1015 return NULL; 1016 } 1017 1018 return Py_BuildValue("i", n); 1019 } 1020 1021 static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist, 1022 PyObject *args __maybe_unused, 1023 PyObject *kwargs __maybe_unused) 1024 { 1025 struct evlist *evlist = &pevlist->evlist; 1026 PyObject *list = PyList_New(0); 1027 int i; 1028 1029 for (i = 0; i < evlist->core.pollfd.nr; ++i) { 1030 PyObject *file; 1031 #if PY_MAJOR_VERSION < 3 1032 FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r"); 1033 1034 if (fp == NULL) 1035 goto free_list; 1036 1037 file = PyFile_FromFile(fp, "perf", "r", NULL); 1038 #else 1039 file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1, 1040 NULL, NULL, NULL, 0); 1041 #endif 1042 if (file == NULL) 1043 goto free_list; 1044 1045 if (PyList_Append(list, file) != 0) { 1046 Py_DECREF(file); 1047 goto free_list; 1048 } 1049 1050 Py_DECREF(file); 1051 } 1052 1053 return list; 1054 free_list: 1055 return PyErr_NoMemory(); 1056 } 1057 1058 1059 static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist, 1060 PyObject *args, 1061 PyObject *kwargs __maybe_unused) 1062 { 1063 struct evlist *evlist = &pevlist->evlist; 1064 PyObject *pevsel; 1065 struct evsel *evsel; 1066 1067 if (!PyArg_ParseTuple(args, "O", &pevsel)) 1068 return NULL; 1069 1070 Py_INCREF(pevsel); 1071 evsel = &((struct pyrf_evsel *)pevsel)->evsel; 1072 evsel->core.idx = evlist->core.nr_entries; 1073 evlist__add(evlist, evsel); 1074 1075 return Py_BuildValue("i", evlist->core.nr_entries); 1076 } 1077 1078 static struct mmap *get_md(struct evlist *evlist, int cpu) 1079 { 1080 int i; 1081 1082 for (i = 0; i < evlist->core.nr_mmaps; i++) { 1083 struct mmap *md = &evlist->mmap[i]; 1084 1085 if (md->core.cpu.cpu == cpu) 1086 return md; 1087 } 1088 1089 return NULL; 1090 } 1091 1092 static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, 1093 PyObject *args, PyObject *kwargs) 1094 { 1095 struct evlist *evlist = &pevlist->evlist; 1096 union perf_event *event; 1097 int sample_id_all = 1, cpu; 1098 static char *kwlist[] = { "cpu", "sample_id_all", NULL }; 1099 struct mmap *md; 1100 int err; 1101 1102 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist, 1103 &cpu, &sample_id_all)) 1104 return NULL; 1105 1106 md = get_md(evlist, cpu); 1107 if (!md) 1108 return NULL; 1109 1110 if (perf_mmap__read_init(&md->core) < 0) 1111 goto end; 1112 1113 event = perf_mmap__read_event(&md->core); 1114 if (event != NULL) { 1115 PyObject *pyevent = pyrf_event__new(event); 1116 struct pyrf_event *pevent = (struct pyrf_event *)pyevent; 1117 struct evsel *evsel; 1118 1119 if (pyevent == NULL) 1120 return PyErr_NoMemory(); 1121 1122 evsel = evlist__event2evsel(evlist, event); 1123 if (!evsel) { 1124 Py_INCREF(Py_None); 1125 return Py_None; 1126 } 1127 1128 pevent->evsel = evsel; 1129 1130 err = evsel__parse_sample(evsel, event, &pevent->sample); 1131 1132 /* Consume the even only after we parsed it out. */ 1133 perf_mmap__consume(&md->core); 1134 1135 if (err) 1136 return PyErr_Format(PyExc_OSError, 1137 "perf: can't parse sample, err=%d", err); 1138 return pyevent; 1139 } 1140 end: 1141 Py_INCREF(Py_None); 1142 return Py_None; 1143 } 1144 1145 static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist, 1146 PyObject *args, PyObject *kwargs) 1147 { 1148 struct evlist *evlist = &pevlist->evlist; 1149 1150 if (evlist__open(evlist) < 0) { 1151 PyErr_SetFromErrno(PyExc_OSError); 1152 return NULL; 1153 } 1154 1155 Py_INCREF(Py_None); 1156 return Py_None; 1157 } 1158 1159 static PyMethodDef pyrf_evlist__methods[] = { 1160 { 1161 .ml_name = "mmap", 1162 .ml_meth = (PyCFunction)pyrf_evlist__mmap, 1163 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1164 .ml_doc = PyDoc_STR("mmap the file descriptor table.") 1165 }, 1166 { 1167 .ml_name = "open", 1168 .ml_meth = (PyCFunction)pyrf_evlist__open, 1169 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1170 .ml_doc = PyDoc_STR("open the file descriptors.") 1171 }, 1172 { 1173 .ml_name = "poll", 1174 .ml_meth = (PyCFunction)pyrf_evlist__poll, 1175 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1176 .ml_doc = PyDoc_STR("poll the file descriptor table.") 1177 }, 1178 { 1179 .ml_name = "get_pollfd", 1180 .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd, 1181 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1182 .ml_doc = PyDoc_STR("get the poll file descriptor table.") 1183 }, 1184 { 1185 .ml_name = "add", 1186 .ml_meth = (PyCFunction)pyrf_evlist__add, 1187 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1188 .ml_doc = PyDoc_STR("adds an event selector to the list.") 1189 }, 1190 { 1191 .ml_name = "read_on_cpu", 1192 .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu, 1193 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1194 .ml_doc = PyDoc_STR("reads an event.") 1195 }, 1196 { .ml_name = NULL, } 1197 }; 1198 1199 static Py_ssize_t pyrf_evlist__length(PyObject *obj) 1200 { 1201 struct pyrf_evlist *pevlist = (void *)obj; 1202 1203 return pevlist->evlist.core.nr_entries; 1204 } 1205 1206 static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i) 1207 { 1208 struct pyrf_evlist *pevlist = (void *)obj; 1209 struct evsel *pos; 1210 1211 if (i >= pevlist->evlist.core.nr_entries) 1212 return NULL; 1213 1214 evlist__for_each_entry(&pevlist->evlist, pos) { 1215 if (i-- == 0) 1216 break; 1217 } 1218 1219 return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel)); 1220 } 1221 1222 static PySequenceMethods pyrf_evlist__sequence_methods = { 1223 .sq_length = pyrf_evlist__length, 1224 .sq_item = pyrf_evlist__item, 1225 }; 1226 1227 static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object."); 1228 1229 static PyTypeObject pyrf_evlist__type = { 1230 PyVarObject_HEAD_INIT(NULL, 0) 1231 .tp_name = "perf.evlist", 1232 .tp_basicsize = sizeof(struct pyrf_evlist), 1233 .tp_dealloc = (destructor)pyrf_evlist__delete, 1234 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 1235 .tp_as_sequence = &pyrf_evlist__sequence_methods, 1236 .tp_doc = pyrf_evlist__doc, 1237 .tp_methods = pyrf_evlist__methods, 1238 .tp_init = (initproc)pyrf_evlist__init, 1239 }; 1240 1241 static int pyrf_evlist__setup_types(void) 1242 { 1243 pyrf_evlist__type.tp_new = PyType_GenericNew; 1244 return PyType_Ready(&pyrf_evlist__type); 1245 } 1246 1247 #define PERF_CONST(name) { #name, PERF_##name } 1248 1249 static struct { 1250 const char *name; 1251 int value; 1252 } perf__constants[] = { 1253 PERF_CONST(TYPE_HARDWARE), 1254 PERF_CONST(TYPE_SOFTWARE), 1255 PERF_CONST(TYPE_TRACEPOINT), 1256 PERF_CONST(TYPE_HW_CACHE), 1257 PERF_CONST(TYPE_RAW), 1258 PERF_CONST(TYPE_BREAKPOINT), 1259 1260 PERF_CONST(COUNT_HW_CPU_CYCLES), 1261 PERF_CONST(COUNT_HW_INSTRUCTIONS), 1262 PERF_CONST(COUNT_HW_CACHE_REFERENCES), 1263 PERF_CONST(COUNT_HW_CACHE_MISSES), 1264 PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS), 1265 PERF_CONST(COUNT_HW_BRANCH_MISSES), 1266 PERF_CONST(COUNT_HW_BUS_CYCLES), 1267 PERF_CONST(COUNT_HW_CACHE_L1D), 1268 PERF_CONST(COUNT_HW_CACHE_L1I), 1269 PERF_CONST(COUNT_HW_CACHE_LL), 1270 PERF_CONST(COUNT_HW_CACHE_DTLB), 1271 PERF_CONST(COUNT_HW_CACHE_ITLB), 1272 PERF_CONST(COUNT_HW_CACHE_BPU), 1273 PERF_CONST(COUNT_HW_CACHE_OP_READ), 1274 PERF_CONST(COUNT_HW_CACHE_OP_WRITE), 1275 PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH), 1276 PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS), 1277 PERF_CONST(COUNT_HW_CACHE_RESULT_MISS), 1278 1279 PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND), 1280 PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND), 1281 1282 PERF_CONST(COUNT_SW_CPU_CLOCK), 1283 PERF_CONST(COUNT_SW_TASK_CLOCK), 1284 PERF_CONST(COUNT_SW_PAGE_FAULTS), 1285 PERF_CONST(COUNT_SW_CONTEXT_SWITCHES), 1286 PERF_CONST(COUNT_SW_CPU_MIGRATIONS), 1287 PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN), 1288 PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ), 1289 PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS), 1290 PERF_CONST(COUNT_SW_EMULATION_FAULTS), 1291 PERF_CONST(COUNT_SW_DUMMY), 1292 1293 PERF_CONST(SAMPLE_IP), 1294 PERF_CONST(SAMPLE_TID), 1295 PERF_CONST(SAMPLE_TIME), 1296 PERF_CONST(SAMPLE_ADDR), 1297 PERF_CONST(SAMPLE_READ), 1298 PERF_CONST(SAMPLE_CALLCHAIN), 1299 PERF_CONST(SAMPLE_ID), 1300 PERF_CONST(SAMPLE_CPU), 1301 PERF_CONST(SAMPLE_PERIOD), 1302 PERF_CONST(SAMPLE_STREAM_ID), 1303 PERF_CONST(SAMPLE_RAW), 1304 1305 PERF_CONST(FORMAT_TOTAL_TIME_ENABLED), 1306 PERF_CONST(FORMAT_TOTAL_TIME_RUNNING), 1307 PERF_CONST(FORMAT_ID), 1308 PERF_CONST(FORMAT_GROUP), 1309 1310 PERF_CONST(RECORD_MMAP), 1311 PERF_CONST(RECORD_LOST), 1312 PERF_CONST(RECORD_COMM), 1313 PERF_CONST(RECORD_EXIT), 1314 PERF_CONST(RECORD_THROTTLE), 1315 PERF_CONST(RECORD_UNTHROTTLE), 1316 PERF_CONST(RECORD_FORK), 1317 PERF_CONST(RECORD_READ), 1318 PERF_CONST(RECORD_SAMPLE), 1319 PERF_CONST(RECORD_MMAP2), 1320 PERF_CONST(RECORD_AUX), 1321 PERF_CONST(RECORD_ITRACE_START), 1322 PERF_CONST(RECORD_LOST_SAMPLES), 1323 PERF_CONST(RECORD_SWITCH), 1324 PERF_CONST(RECORD_SWITCH_CPU_WIDE), 1325 1326 PERF_CONST(RECORD_MISC_SWITCH_OUT), 1327 { .name = NULL, }, 1328 }; 1329 1330 static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel, 1331 PyObject *args, PyObject *kwargs) 1332 { 1333 #ifndef HAVE_LIBTRACEEVENT 1334 return NULL; 1335 #else 1336 struct tep_event *tp_format; 1337 static char *kwlist[] = { "sys", "name", NULL }; 1338 char *sys = NULL; 1339 char *name = NULL; 1340 1341 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist, 1342 &sys, &name)) 1343 return NULL; 1344 1345 tp_format = trace_event__tp_format(sys, name); 1346 if (IS_ERR(tp_format)) 1347 return _PyLong_FromLong(-1); 1348 1349 return _PyLong_FromLong(tp_format->id); 1350 #endif // HAVE_LIBTRACEEVENT 1351 } 1352 1353 static PyMethodDef perf__methods[] = { 1354 { 1355 .ml_name = "tracepoint", 1356 .ml_meth = (PyCFunction) pyrf__tracepoint, 1357 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1358 .ml_doc = PyDoc_STR("Get tracepoint config.") 1359 }, 1360 { .ml_name = NULL, } 1361 }; 1362 1363 #if PY_MAJOR_VERSION < 3 1364 PyMODINIT_FUNC initperf(void) 1365 #else 1366 PyMODINIT_FUNC PyInit_perf(void) 1367 #endif 1368 { 1369 PyObject *obj; 1370 int i; 1371 PyObject *dict; 1372 #if PY_MAJOR_VERSION < 3 1373 PyObject *module = Py_InitModule("perf", perf__methods); 1374 #else 1375 static struct PyModuleDef moduledef = { 1376 PyModuleDef_HEAD_INIT, 1377 "perf", /* m_name */ 1378 "", /* m_doc */ 1379 -1, /* m_size */ 1380 perf__methods, /* m_methods */ 1381 NULL, /* m_reload */ 1382 NULL, /* m_traverse */ 1383 NULL, /* m_clear */ 1384 NULL, /* m_free */ 1385 }; 1386 PyObject *module = PyModule_Create(&moduledef); 1387 #endif 1388 1389 if (module == NULL || 1390 pyrf_event__setup_types() < 0 || 1391 pyrf_evlist__setup_types() < 0 || 1392 pyrf_evsel__setup_types() < 0 || 1393 pyrf_thread_map__setup_types() < 0 || 1394 pyrf_cpu_map__setup_types() < 0) 1395 #if PY_MAJOR_VERSION < 3 1396 return; 1397 #else 1398 return module; 1399 #endif 1400 1401 /* The page_size is placed in util object. */ 1402 page_size = sysconf(_SC_PAGE_SIZE); 1403 1404 Py_INCREF(&pyrf_evlist__type); 1405 PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type); 1406 1407 Py_INCREF(&pyrf_evsel__type); 1408 PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type); 1409 1410 Py_INCREF(&pyrf_mmap_event__type); 1411 PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type); 1412 1413 Py_INCREF(&pyrf_lost_event__type); 1414 PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type); 1415 1416 Py_INCREF(&pyrf_comm_event__type); 1417 PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type); 1418 1419 Py_INCREF(&pyrf_task_event__type); 1420 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type); 1421 1422 Py_INCREF(&pyrf_throttle_event__type); 1423 PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type); 1424 1425 Py_INCREF(&pyrf_task_event__type); 1426 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type); 1427 1428 Py_INCREF(&pyrf_read_event__type); 1429 PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type); 1430 1431 Py_INCREF(&pyrf_sample_event__type); 1432 PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type); 1433 1434 Py_INCREF(&pyrf_context_switch_event__type); 1435 PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type); 1436 1437 Py_INCREF(&pyrf_thread_map__type); 1438 PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type); 1439 1440 Py_INCREF(&pyrf_cpu_map__type); 1441 PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type); 1442 1443 dict = PyModule_GetDict(module); 1444 if (dict == NULL) 1445 goto error; 1446 1447 for (i = 0; perf__constants[i].name != NULL; i++) { 1448 obj = _PyLong_FromLong(perf__constants[i].value); 1449 if (obj == NULL) 1450 goto error; 1451 PyDict_SetItemString(dict, perf__constants[i].name, obj); 1452 Py_DECREF(obj); 1453 } 1454 1455 error: 1456 if (PyErr_Occurred()) 1457 PyErr_SetString(PyExc_ImportError, "perf: Init failed!"); 1458 #if PY_MAJOR_VERSION >= 3 1459 return module; 1460 #endif 1461 } 1462 1463 /* 1464 * Dummy, to avoid dragging all the test_attr infrastructure in the python 1465 * binding. 1466 */ 1467 void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu, 1468 int fd, int group_fd, unsigned long flags) 1469 { 1470 } 1471