1 // SPDX-License-Identifier: GPL-2.0 2 #include <Python.h> 3 #include <structmember.h> 4 #include <inttypes.h> 5 #include <poll.h> 6 #include <linux/err.h> 7 #include <perf/cpumap.h> 8 #ifdef HAVE_LIBTRACEEVENT 9 #include <traceevent/event-parse.h> 10 #endif 11 #include <perf/mmap.h> 12 #include "evlist.h" 13 #include "callchain.h" 14 #include "evsel.h" 15 #include "event.h" 16 #include "print_binary.h" 17 #include "thread_map.h" 18 #include "trace-event.h" 19 #include "mmap.h" 20 #include "stat.h" 21 #include "metricgroup.h" 22 #include "util/env.h" 23 #include "util/pmu.h" 24 #include <internal/lib.h> 25 #include "util.h" 26 27 #if PY_MAJOR_VERSION < 3 28 #define _PyUnicode_FromString(arg) \ 29 PyString_FromString(arg) 30 #define _PyUnicode_AsString(arg) \ 31 PyString_AsString(arg) 32 #define _PyUnicode_FromFormat(...) \ 33 PyString_FromFormat(__VA_ARGS__) 34 #define _PyLong_FromLong(arg) \ 35 PyInt_FromLong(arg) 36 37 #else 38 39 #define _PyUnicode_FromString(arg) \ 40 PyUnicode_FromString(arg) 41 #define _PyUnicode_FromFormat(...) \ 42 PyUnicode_FromFormat(__VA_ARGS__) 43 #define _PyLong_FromLong(arg) \ 44 PyLong_FromLong(arg) 45 #endif 46 47 #ifndef Py_TYPE 48 #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) 49 #endif 50 51 /* 52 * Provide these two so that we don't have to link against callchain.c and 53 * start dragging hist.c, etc. 54 */ 55 struct callchain_param callchain_param; 56 57 int parse_callchain_record(const char *arg __maybe_unused, 58 struct callchain_param *param __maybe_unused) 59 { 60 return 0; 61 } 62 63 /* 64 * Add these not to drag util/env.c 65 */ 66 struct perf_env perf_env; 67 68 const char *perf_env__cpuid(struct perf_env *env __maybe_unused) 69 { 70 return NULL; 71 } 72 73 // This one is a bit easier, wouldn't drag too much, but leave it as a stub we need it here 74 const char *perf_env__arch(struct perf_env *env __maybe_unused) 75 { 76 return NULL; 77 } 78 79 /* 80 * These ones are needed not to drag the PMU bandwagon, jevents generated 81 * pmu_sys_event_tables, etc and evsel__find_pmu() is used so far just for 82 * doing per PMU perf_event_attr.exclude_guest handling, not really needed, so 83 * far, for the perf python binding known usecases, revisit if this become 84 * necessary. 85 */ 86 struct perf_pmu *evsel__find_pmu(const struct evsel *evsel __maybe_unused) 87 { 88 return NULL; 89 } 90 91 int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, ...) 92 { 93 return EOF; 94 } 95 96 bool evsel__is_aux_event(const struct evsel *evsel __maybe_unused) 97 { 98 return false; 99 } 100 101 /* 102 * Add this one here not to drag util/metricgroup.c 103 */ 104 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp, 105 struct rblist *new_metric_events, 106 struct rblist *old_metric_events) 107 { 108 return 0; 109 } 110 111 /* 112 * XXX: All these evsel destructors need some better mechanism, like a linked 113 * list of destructors registered when the relevant code indeed is used instead 114 * of having more and more calls in perf_evsel__delete(). -- acme 115 * 116 * For now, add some more: 117 * 118 * Not to drag the BPF bandwagon... 119 */ 120 void bpf_counter__destroy(struct evsel *evsel); 121 int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd); 122 int bpf_counter__disable(struct evsel *evsel); 123 124 void bpf_counter__destroy(struct evsel *evsel __maybe_unused) 125 { 126 } 127 128 int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_unused, int fd __maybe_unused) 129 { 130 return 0; 131 } 132 133 int bpf_counter__disable(struct evsel *evsel __maybe_unused) 134 { 135 return 0; 136 } 137 138 /* 139 * Support debug printing even though util/debug.c is not linked. That means 140 * implementing 'verbose' and 'eprintf'. 141 */ 142 int verbose; 143 int debug_peo_args; 144 145 int eprintf(int level, int var, const char *fmt, ...); 146 147 int eprintf(int level, int var, const char *fmt, ...) 148 { 149 va_list args; 150 int ret = 0; 151 152 if (var >= level) { 153 va_start(args, fmt); 154 ret = vfprintf(stderr, fmt, args); 155 va_end(args); 156 } 157 158 return ret; 159 } 160 161 /* Define PyVarObject_HEAD_INIT for python 2.5 */ 162 #ifndef PyVarObject_HEAD_INIT 163 # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, 164 #endif 165 166 #if PY_MAJOR_VERSION < 3 167 PyMODINIT_FUNC initperf(void); 168 #else 169 PyMODINIT_FUNC PyInit_perf(void); 170 #endif 171 172 #define member_def(type, member, ptype, help) \ 173 { #member, ptype, \ 174 offsetof(struct pyrf_event, event) + offsetof(struct type, member), \ 175 0, help } 176 177 #define sample_member_def(name, member, ptype, help) \ 178 { #name, ptype, \ 179 offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \ 180 0, help } 181 182 struct pyrf_event { 183 PyObject_HEAD 184 struct evsel *evsel; 185 struct perf_sample sample; 186 union perf_event event; 187 }; 188 189 #define sample_members \ 190 sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \ 191 sample_member_def(sample_pid, pid, T_INT, "event pid"), \ 192 sample_member_def(sample_tid, tid, T_INT, "event tid"), \ 193 sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \ 194 sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \ 195 sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \ 196 sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \ 197 sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \ 198 sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"), 199 200 static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object."); 201 202 static PyMemberDef pyrf_mmap_event__members[] = { 203 sample_members 204 member_def(perf_event_header, type, T_UINT, "event type"), 205 member_def(perf_event_header, misc, T_UINT, "event misc"), 206 member_def(perf_record_mmap, pid, T_UINT, "event pid"), 207 member_def(perf_record_mmap, tid, T_UINT, "event tid"), 208 member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"), 209 member_def(perf_record_mmap, len, T_ULONGLONG, "map length"), 210 member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"), 211 member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"), 212 { .name = NULL, }, 213 }; 214 215 static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent) 216 { 217 PyObject *ret; 218 char *s; 219 220 if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", " 221 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", " 222 "filename: %s }", 223 pevent->event.mmap.pid, pevent->event.mmap.tid, 224 pevent->event.mmap.start, pevent->event.mmap.len, 225 pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) { 226 ret = PyErr_NoMemory(); 227 } else { 228 ret = _PyUnicode_FromString(s); 229 free(s); 230 } 231 return ret; 232 } 233 234 static PyTypeObject pyrf_mmap_event__type = { 235 PyVarObject_HEAD_INIT(NULL, 0) 236 .tp_name = "perf.mmap_event", 237 .tp_basicsize = sizeof(struct pyrf_event), 238 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 239 .tp_doc = pyrf_mmap_event__doc, 240 .tp_members = pyrf_mmap_event__members, 241 .tp_repr = (reprfunc)pyrf_mmap_event__repr, 242 }; 243 244 static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object."); 245 246 static PyMemberDef pyrf_task_event__members[] = { 247 sample_members 248 member_def(perf_event_header, type, T_UINT, "event type"), 249 member_def(perf_record_fork, pid, T_UINT, "event pid"), 250 member_def(perf_record_fork, ppid, T_UINT, "event ppid"), 251 member_def(perf_record_fork, tid, T_UINT, "event tid"), 252 member_def(perf_record_fork, ptid, T_UINT, "event ptid"), 253 member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"), 254 { .name = NULL, }, 255 }; 256 257 static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent) 258 { 259 return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, " 260 "ptid: %u, time: %" PRI_lu64 "}", 261 pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit", 262 pevent->event.fork.pid, 263 pevent->event.fork.ppid, 264 pevent->event.fork.tid, 265 pevent->event.fork.ptid, 266 pevent->event.fork.time); 267 } 268 269 static PyTypeObject pyrf_task_event__type = { 270 PyVarObject_HEAD_INIT(NULL, 0) 271 .tp_name = "perf.task_event", 272 .tp_basicsize = sizeof(struct pyrf_event), 273 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 274 .tp_doc = pyrf_task_event__doc, 275 .tp_members = pyrf_task_event__members, 276 .tp_repr = (reprfunc)pyrf_task_event__repr, 277 }; 278 279 static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object."); 280 281 static PyMemberDef pyrf_comm_event__members[] = { 282 sample_members 283 member_def(perf_event_header, type, T_UINT, "event type"), 284 member_def(perf_record_comm, pid, T_UINT, "event pid"), 285 member_def(perf_record_comm, tid, T_UINT, "event tid"), 286 member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"), 287 { .name = NULL, }, 288 }; 289 290 static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent) 291 { 292 return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }", 293 pevent->event.comm.pid, 294 pevent->event.comm.tid, 295 pevent->event.comm.comm); 296 } 297 298 static PyTypeObject pyrf_comm_event__type = { 299 PyVarObject_HEAD_INIT(NULL, 0) 300 .tp_name = "perf.comm_event", 301 .tp_basicsize = sizeof(struct pyrf_event), 302 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 303 .tp_doc = pyrf_comm_event__doc, 304 .tp_members = pyrf_comm_event__members, 305 .tp_repr = (reprfunc)pyrf_comm_event__repr, 306 }; 307 308 static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object."); 309 310 static PyMemberDef pyrf_throttle_event__members[] = { 311 sample_members 312 member_def(perf_event_header, type, T_UINT, "event type"), 313 member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"), 314 member_def(perf_record_throttle, id, T_ULONGLONG, "event id"), 315 member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"), 316 { .name = NULL, }, 317 }; 318 319 static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent) 320 { 321 struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1); 322 323 return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64 324 ", stream_id: %" PRI_lu64 " }", 325 pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un", 326 te->time, te->id, te->stream_id); 327 } 328 329 static PyTypeObject pyrf_throttle_event__type = { 330 PyVarObject_HEAD_INIT(NULL, 0) 331 .tp_name = "perf.throttle_event", 332 .tp_basicsize = sizeof(struct pyrf_event), 333 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 334 .tp_doc = pyrf_throttle_event__doc, 335 .tp_members = pyrf_throttle_event__members, 336 .tp_repr = (reprfunc)pyrf_throttle_event__repr, 337 }; 338 339 static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object."); 340 341 static PyMemberDef pyrf_lost_event__members[] = { 342 sample_members 343 member_def(perf_record_lost, id, T_ULONGLONG, "event id"), 344 member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"), 345 { .name = NULL, }, 346 }; 347 348 static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent) 349 { 350 PyObject *ret; 351 char *s; 352 353 if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", " 354 "lost: %#" PRI_lx64 " }", 355 pevent->event.lost.id, pevent->event.lost.lost) < 0) { 356 ret = PyErr_NoMemory(); 357 } else { 358 ret = _PyUnicode_FromString(s); 359 free(s); 360 } 361 return ret; 362 } 363 364 static PyTypeObject pyrf_lost_event__type = { 365 PyVarObject_HEAD_INIT(NULL, 0) 366 .tp_name = "perf.lost_event", 367 .tp_basicsize = sizeof(struct pyrf_event), 368 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 369 .tp_doc = pyrf_lost_event__doc, 370 .tp_members = pyrf_lost_event__members, 371 .tp_repr = (reprfunc)pyrf_lost_event__repr, 372 }; 373 374 static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object."); 375 376 static PyMemberDef pyrf_read_event__members[] = { 377 sample_members 378 member_def(perf_record_read, pid, T_UINT, "event pid"), 379 member_def(perf_record_read, tid, T_UINT, "event tid"), 380 { .name = NULL, }, 381 }; 382 383 static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent) 384 { 385 return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }", 386 pevent->event.read.pid, 387 pevent->event.read.tid); 388 /* 389 * FIXME: return the array of read values, 390 * making this method useful ;-) 391 */ 392 } 393 394 static PyTypeObject pyrf_read_event__type = { 395 PyVarObject_HEAD_INIT(NULL, 0) 396 .tp_name = "perf.read_event", 397 .tp_basicsize = sizeof(struct pyrf_event), 398 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 399 .tp_doc = pyrf_read_event__doc, 400 .tp_members = pyrf_read_event__members, 401 .tp_repr = (reprfunc)pyrf_read_event__repr, 402 }; 403 404 static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object."); 405 406 static PyMemberDef pyrf_sample_event__members[] = { 407 sample_members 408 member_def(perf_event_header, type, T_UINT, "event type"), 409 { .name = NULL, }, 410 }; 411 412 static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent) 413 { 414 PyObject *ret; 415 char *s; 416 417 if (asprintf(&s, "{ type: sample }") < 0) { 418 ret = PyErr_NoMemory(); 419 } else { 420 ret = _PyUnicode_FromString(s); 421 free(s); 422 } 423 return ret; 424 } 425 426 #ifdef HAVE_LIBTRACEEVENT 427 static bool is_tracepoint(struct pyrf_event *pevent) 428 { 429 return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT; 430 } 431 432 static PyObject* 433 tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field) 434 { 435 struct tep_handle *pevent = field->event->tep; 436 void *data = pe->sample.raw_data; 437 PyObject *ret = NULL; 438 unsigned long long val; 439 unsigned int offset, len; 440 441 if (field->flags & TEP_FIELD_IS_ARRAY) { 442 offset = field->offset; 443 len = field->size; 444 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 445 val = tep_read_number(pevent, data + offset, len); 446 offset = val; 447 len = offset >> 16; 448 offset &= 0xffff; 449 if (tep_field_is_relative(field->flags)) 450 offset += field->offset + field->size; 451 } 452 if (field->flags & TEP_FIELD_IS_STRING && 453 is_printable_array(data + offset, len)) { 454 ret = _PyUnicode_FromString((char *)data + offset); 455 } else { 456 ret = PyByteArray_FromStringAndSize((const char *) data + offset, len); 457 field->flags &= ~TEP_FIELD_IS_STRING; 458 } 459 } else { 460 val = tep_read_number(pevent, data + field->offset, 461 field->size); 462 if (field->flags & TEP_FIELD_IS_POINTER) 463 ret = PyLong_FromUnsignedLong((unsigned long) val); 464 else if (field->flags & TEP_FIELD_IS_SIGNED) 465 ret = PyLong_FromLong((long) val); 466 else 467 ret = PyLong_FromUnsignedLong((unsigned long) val); 468 } 469 470 return ret; 471 } 472 473 static PyObject* 474 get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name) 475 { 476 const char *str = _PyUnicode_AsString(PyObject_Str(attr_name)); 477 struct evsel *evsel = pevent->evsel; 478 struct tep_format_field *field; 479 480 if (!evsel->tp_format) { 481 struct tep_event *tp_format; 482 483 tp_format = trace_event__tp_format_id(evsel->core.attr.config); 484 if (IS_ERR_OR_NULL(tp_format)) 485 return NULL; 486 487 evsel->tp_format = tp_format; 488 } 489 490 field = tep_find_any_field(evsel->tp_format, str); 491 if (!field) 492 return NULL; 493 494 return tracepoint_field(pevent, field); 495 } 496 #endif /* HAVE_LIBTRACEEVENT */ 497 498 static PyObject* 499 pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name) 500 { 501 PyObject *obj = NULL; 502 503 #ifdef HAVE_LIBTRACEEVENT 504 if (is_tracepoint(pevent)) 505 obj = get_tracepoint_field(pevent, attr_name); 506 #endif 507 508 return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name); 509 } 510 511 static PyTypeObject pyrf_sample_event__type = { 512 PyVarObject_HEAD_INIT(NULL, 0) 513 .tp_name = "perf.sample_event", 514 .tp_basicsize = sizeof(struct pyrf_event), 515 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 516 .tp_doc = pyrf_sample_event__doc, 517 .tp_members = pyrf_sample_event__members, 518 .tp_repr = (reprfunc)pyrf_sample_event__repr, 519 .tp_getattro = (getattrofunc) pyrf_sample_event__getattro, 520 }; 521 522 static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object."); 523 524 static PyMemberDef pyrf_context_switch_event__members[] = { 525 sample_members 526 member_def(perf_event_header, type, T_UINT, "event type"), 527 member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"), 528 member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"), 529 { .name = NULL, }, 530 }; 531 532 static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent) 533 { 534 PyObject *ret; 535 char *s; 536 537 if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }", 538 pevent->event.context_switch.next_prev_pid, 539 pevent->event.context_switch.next_prev_tid, 540 !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) { 541 ret = PyErr_NoMemory(); 542 } else { 543 ret = _PyUnicode_FromString(s); 544 free(s); 545 } 546 return ret; 547 } 548 549 static PyTypeObject pyrf_context_switch_event__type = { 550 PyVarObject_HEAD_INIT(NULL, 0) 551 .tp_name = "perf.context_switch_event", 552 .tp_basicsize = sizeof(struct pyrf_event), 553 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 554 .tp_doc = pyrf_context_switch_event__doc, 555 .tp_members = pyrf_context_switch_event__members, 556 .tp_repr = (reprfunc)pyrf_context_switch_event__repr, 557 }; 558 559 static int pyrf_event__setup_types(void) 560 { 561 int err; 562 pyrf_mmap_event__type.tp_new = 563 pyrf_task_event__type.tp_new = 564 pyrf_comm_event__type.tp_new = 565 pyrf_lost_event__type.tp_new = 566 pyrf_read_event__type.tp_new = 567 pyrf_sample_event__type.tp_new = 568 pyrf_context_switch_event__type.tp_new = 569 pyrf_throttle_event__type.tp_new = PyType_GenericNew; 570 err = PyType_Ready(&pyrf_mmap_event__type); 571 if (err < 0) 572 goto out; 573 err = PyType_Ready(&pyrf_lost_event__type); 574 if (err < 0) 575 goto out; 576 err = PyType_Ready(&pyrf_task_event__type); 577 if (err < 0) 578 goto out; 579 err = PyType_Ready(&pyrf_comm_event__type); 580 if (err < 0) 581 goto out; 582 err = PyType_Ready(&pyrf_throttle_event__type); 583 if (err < 0) 584 goto out; 585 err = PyType_Ready(&pyrf_read_event__type); 586 if (err < 0) 587 goto out; 588 err = PyType_Ready(&pyrf_sample_event__type); 589 if (err < 0) 590 goto out; 591 err = PyType_Ready(&pyrf_context_switch_event__type); 592 if (err < 0) 593 goto out; 594 out: 595 return err; 596 } 597 598 static PyTypeObject *pyrf_event__type[] = { 599 [PERF_RECORD_MMAP] = &pyrf_mmap_event__type, 600 [PERF_RECORD_LOST] = &pyrf_lost_event__type, 601 [PERF_RECORD_COMM] = &pyrf_comm_event__type, 602 [PERF_RECORD_EXIT] = &pyrf_task_event__type, 603 [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type, 604 [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type, 605 [PERF_RECORD_FORK] = &pyrf_task_event__type, 606 [PERF_RECORD_READ] = &pyrf_read_event__type, 607 [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type, 608 [PERF_RECORD_SWITCH] = &pyrf_context_switch_event__type, 609 [PERF_RECORD_SWITCH_CPU_WIDE] = &pyrf_context_switch_event__type, 610 }; 611 612 static PyObject *pyrf_event__new(union perf_event *event) 613 { 614 struct pyrf_event *pevent; 615 PyTypeObject *ptype; 616 617 if ((event->header.type < PERF_RECORD_MMAP || 618 event->header.type > PERF_RECORD_SAMPLE) && 619 !(event->header.type == PERF_RECORD_SWITCH || 620 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)) 621 return NULL; 622 623 ptype = pyrf_event__type[event->header.type]; 624 pevent = PyObject_New(struct pyrf_event, ptype); 625 if (pevent != NULL) 626 memcpy(&pevent->event, event, event->header.size); 627 return (PyObject *)pevent; 628 } 629 630 struct pyrf_cpu_map { 631 PyObject_HEAD 632 633 struct perf_cpu_map *cpus; 634 }; 635 636 static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus, 637 PyObject *args, PyObject *kwargs) 638 { 639 static char *kwlist[] = { "cpustr", NULL }; 640 char *cpustr = NULL; 641 642 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s", 643 kwlist, &cpustr)) 644 return -1; 645 646 pcpus->cpus = perf_cpu_map__new(cpustr); 647 if (pcpus->cpus == NULL) 648 return -1; 649 return 0; 650 } 651 652 static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus) 653 { 654 perf_cpu_map__put(pcpus->cpus); 655 Py_TYPE(pcpus)->tp_free((PyObject*)pcpus); 656 } 657 658 static Py_ssize_t pyrf_cpu_map__length(PyObject *obj) 659 { 660 struct pyrf_cpu_map *pcpus = (void *)obj; 661 662 return perf_cpu_map__nr(pcpus->cpus); 663 } 664 665 static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i) 666 { 667 struct pyrf_cpu_map *pcpus = (void *)obj; 668 669 if (i >= perf_cpu_map__nr(pcpus->cpus)) 670 return NULL; 671 672 return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu); 673 } 674 675 static PySequenceMethods pyrf_cpu_map__sequence_methods = { 676 .sq_length = pyrf_cpu_map__length, 677 .sq_item = pyrf_cpu_map__item, 678 }; 679 680 static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object."); 681 682 static PyTypeObject pyrf_cpu_map__type = { 683 PyVarObject_HEAD_INIT(NULL, 0) 684 .tp_name = "perf.cpu_map", 685 .tp_basicsize = sizeof(struct pyrf_cpu_map), 686 .tp_dealloc = (destructor)pyrf_cpu_map__delete, 687 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 688 .tp_doc = pyrf_cpu_map__doc, 689 .tp_as_sequence = &pyrf_cpu_map__sequence_methods, 690 .tp_init = (initproc)pyrf_cpu_map__init, 691 }; 692 693 static int pyrf_cpu_map__setup_types(void) 694 { 695 pyrf_cpu_map__type.tp_new = PyType_GenericNew; 696 return PyType_Ready(&pyrf_cpu_map__type); 697 } 698 699 struct pyrf_thread_map { 700 PyObject_HEAD 701 702 struct perf_thread_map *threads; 703 }; 704 705 static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads, 706 PyObject *args, PyObject *kwargs) 707 { 708 static char *kwlist[] = { "pid", "tid", "uid", NULL }; 709 int pid = -1, tid = -1, uid = UINT_MAX; 710 711 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii", 712 kwlist, &pid, &tid, &uid)) 713 return -1; 714 715 pthreads->threads = thread_map__new(pid, tid, uid); 716 if (pthreads->threads == NULL) 717 return -1; 718 return 0; 719 } 720 721 static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads) 722 { 723 perf_thread_map__put(pthreads->threads); 724 Py_TYPE(pthreads)->tp_free((PyObject*)pthreads); 725 } 726 727 static Py_ssize_t pyrf_thread_map__length(PyObject *obj) 728 { 729 struct pyrf_thread_map *pthreads = (void *)obj; 730 731 return perf_thread_map__nr(pthreads->threads); 732 } 733 734 static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i) 735 { 736 struct pyrf_thread_map *pthreads = (void *)obj; 737 738 if (i >= perf_thread_map__nr(pthreads->threads)) 739 return NULL; 740 741 return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i)); 742 } 743 744 static PySequenceMethods pyrf_thread_map__sequence_methods = { 745 .sq_length = pyrf_thread_map__length, 746 .sq_item = pyrf_thread_map__item, 747 }; 748 749 static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object."); 750 751 static PyTypeObject pyrf_thread_map__type = { 752 PyVarObject_HEAD_INIT(NULL, 0) 753 .tp_name = "perf.thread_map", 754 .tp_basicsize = sizeof(struct pyrf_thread_map), 755 .tp_dealloc = (destructor)pyrf_thread_map__delete, 756 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 757 .tp_doc = pyrf_thread_map__doc, 758 .tp_as_sequence = &pyrf_thread_map__sequence_methods, 759 .tp_init = (initproc)pyrf_thread_map__init, 760 }; 761 762 static int pyrf_thread_map__setup_types(void) 763 { 764 pyrf_thread_map__type.tp_new = PyType_GenericNew; 765 return PyType_Ready(&pyrf_thread_map__type); 766 } 767 768 struct pyrf_evsel { 769 PyObject_HEAD 770 771 struct evsel evsel; 772 }; 773 774 static int pyrf_evsel__init(struct pyrf_evsel *pevsel, 775 PyObject *args, PyObject *kwargs) 776 { 777 struct perf_event_attr attr = { 778 .type = PERF_TYPE_HARDWARE, 779 .config = PERF_COUNT_HW_CPU_CYCLES, 780 .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID, 781 }; 782 static char *kwlist[] = { 783 "type", 784 "config", 785 "sample_freq", 786 "sample_period", 787 "sample_type", 788 "read_format", 789 "disabled", 790 "inherit", 791 "pinned", 792 "exclusive", 793 "exclude_user", 794 "exclude_kernel", 795 "exclude_hv", 796 "exclude_idle", 797 "mmap", 798 "context_switch", 799 "comm", 800 "freq", 801 "inherit_stat", 802 "enable_on_exec", 803 "task", 804 "watermark", 805 "precise_ip", 806 "mmap_data", 807 "sample_id_all", 808 "wakeup_events", 809 "bp_type", 810 "bp_addr", 811 "bp_len", 812 NULL 813 }; 814 u64 sample_period = 0; 815 u32 disabled = 0, 816 inherit = 0, 817 pinned = 0, 818 exclusive = 0, 819 exclude_user = 0, 820 exclude_kernel = 0, 821 exclude_hv = 0, 822 exclude_idle = 0, 823 mmap = 0, 824 context_switch = 0, 825 comm = 0, 826 freq = 1, 827 inherit_stat = 0, 828 enable_on_exec = 0, 829 task = 0, 830 watermark = 0, 831 precise_ip = 0, 832 mmap_data = 0, 833 sample_id_all = 1; 834 int idx = 0; 835 836 if (!PyArg_ParseTupleAndKeywords(args, kwargs, 837 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist, 838 &attr.type, &attr.config, &attr.sample_freq, 839 &sample_period, &attr.sample_type, 840 &attr.read_format, &disabled, &inherit, 841 &pinned, &exclusive, &exclude_user, 842 &exclude_kernel, &exclude_hv, &exclude_idle, 843 &mmap, &context_switch, &comm, &freq, &inherit_stat, 844 &enable_on_exec, &task, &watermark, 845 &precise_ip, &mmap_data, &sample_id_all, 846 &attr.wakeup_events, &attr.bp_type, 847 &attr.bp_addr, &attr.bp_len, &idx)) 848 return -1; 849 850 /* union... */ 851 if (sample_period != 0) { 852 if (attr.sample_freq != 0) 853 return -1; /* FIXME: throw right exception */ 854 attr.sample_period = sample_period; 855 } 856 857 /* Bitfields */ 858 attr.disabled = disabled; 859 attr.inherit = inherit; 860 attr.pinned = pinned; 861 attr.exclusive = exclusive; 862 attr.exclude_user = exclude_user; 863 attr.exclude_kernel = exclude_kernel; 864 attr.exclude_hv = exclude_hv; 865 attr.exclude_idle = exclude_idle; 866 attr.mmap = mmap; 867 attr.context_switch = context_switch; 868 attr.comm = comm; 869 attr.freq = freq; 870 attr.inherit_stat = inherit_stat; 871 attr.enable_on_exec = enable_on_exec; 872 attr.task = task; 873 attr.watermark = watermark; 874 attr.precise_ip = precise_ip; 875 attr.mmap_data = mmap_data; 876 attr.sample_id_all = sample_id_all; 877 attr.size = sizeof(attr); 878 879 evsel__init(&pevsel->evsel, &attr, idx); 880 return 0; 881 } 882 883 static void pyrf_evsel__delete(struct pyrf_evsel *pevsel) 884 { 885 evsel__exit(&pevsel->evsel); 886 Py_TYPE(pevsel)->tp_free((PyObject*)pevsel); 887 } 888 889 static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel, 890 PyObject *args, PyObject *kwargs) 891 { 892 struct evsel *evsel = &pevsel->evsel; 893 struct perf_cpu_map *cpus = NULL; 894 struct perf_thread_map *threads = NULL; 895 PyObject *pcpus = NULL, *pthreads = NULL; 896 int group = 0, inherit = 0; 897 static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL }; 898 899 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, 900 &pcpus, &pthreads, &group, &inherit)) 901 return NULL; 902 903 if (pthreads != NULL) 904 threads = ((struct pyrf_thread_map *)pthreads)->threads; 905 906 if (pcpus != NULL) 907 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; 908 909 evsel->core.attr.inherit = inherit; 910 /* 911 * This will group just the fds for this single evsel, to group 912 * multiple events, use evlist.open(). 913 */ 914 if (evsel__open(evsel, cpus, threads) < 0) { 915 PyErr_SetFromErrno(PyExc_OSError); 916 return NULL; 917 } 918 919 Py_INCREF(Py_None); 920 return Py_None; 921 } 922 923 static PyMethodDef pyrf_evsel__methods[] = { 924 { 925 .ml_name = "open", 926 .ml_meth = (PyCFunction)pyrf_evsel__open, 927 .ml_flags = METH_VARARGS | METH_KEYWORDS, 928 .ml_doc = PyDoc_STR("open the event selector file descriptor table.") 929 }, 930 { .ml_name = NULL, } 931 }; 932 933 static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object."); 934 935 static PyTypeObject pyrf_evsel__type = { 936 PyVarObject_HEAD_INIT(NULL, 0) 937 .tp_name = "perf.evsel", 938 .tp_basicsize = sizeof(struct pyrf_evsel), 939 .tp_dealloc = (destructor)pyrf_evsel__delete, 940 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 941 .tp_doc = pyrf_evsel__doc, 942 .tp_methods = pyrf_evsel__methods, 943 .tp_init = (initproc)pyrf_evsel__init, 944 }; 945 946 static int pyrf_evsel__setup_types(void) 947 { 948 pyrf_evsel__type.tp_new = PyType_GenericNew; 949 return PyType_Ready(&pyrf_evsel__type); 950 } 951 952 struct pyrf_evlist { 953 PyObject_HEAD 954 955 struct evlist evlist; 956 }; 957 958 static int pyrf_evlist__init(struct pyrf_evlist *pevlist, 959 PyObject *args, PyObject *kwargs __maybe_unused) 960 { 961 PyObject *pcpus = NULL, *pthreads = NULL; 962 struct perf_cpu_map *cpus; 963 struct perf_thread_map *threads; 964 965 if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads)) 966 return -1; 967 968 threads = ((struct pyrf_thread_map *)pthreads)->threads; 969 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; 970 evlist__init(&pevlist->evlist, cpus, threads); 971 return 0; 972 } 973 974 static void pyrf_evlist__delete(struct pyrf_evlist *pevlist) 975 { 976 evlist__exit(&pevlist->evlist); 977 Py_TYPE(pevlist)->tp_free((PyObject*)pevlist); 978 } 979 980 static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist, 981 PyObject *args, PyObject *kwargs) 982 { 983 struct evlist *evlist = &pevlist->evlist; 984 static char *kwlist[] = { "pages", "overwrite", NULL }; 985 int pages = 128, overwrite = false; 986 987 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist, 988 &pages, &overwrite)) 989 return NULL; 990 991 if (evlist__mmap(evlist, pages) < 0) { 992 PyErr_SetFromErrno(PyExc_OSError); 993 return NULL; 994 } 995 996 Py_INCREF(Py_None); 997 return Py_None; 998 } 999 1000 static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist, 1001 PyObject *args, PyObject *kwargs) 1002 { 1003 struct evlist *evlist = &pevlist->evlist; 1004 static char *kwlist[] = { "timeout", NULL }; 1005 int timeout = -1, n; 1006 1007 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout)) 1008 return NULL; 1009 1010 n = evlist__poll(evlist, timeout); 1011 if (n < 0) { 1012 PyErr_SetFromErrno(PyExc_OSError); 1013 return NULL; 1014 } 1015 1016 return Py_BuildValue("i", n); 1017 } 1018 1019 static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist, 1020 PyObject *args __maybe_unused, 1021 PyObject *kwargs __maybe_unused) 1022 { 1023 struct evlist *evlist = &pevlist->evlist; 1024 PyObject *list = PyList_New(0); 1025 int i; 1026 1027 for (i = 0; i < evlist->core.pollfd.nr; ++i) { 1028 PyObject *file; 1029 #if PY_MAJOR_VERSION < 3 1030 FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r"); 1031 1032 if (fp == NULL) 1033 goto free_list; 1034 1035 file = PyFile_FromFile(fp, "perf", "r", NULL); 1036 #else 1037 file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1, 1038 NULL, NULL, NULL, 0); 1039 #endif 1040 if (file == NULL) 1041 goto free_list; 1042 1043 if (PyList_Append(list, file) != 0) { 1044 Py_DECREF(file); 1045 goto free_list; 1046 } 1047 1048 Py_DECREF(file); 1049 } 1050 1051 return list; 1052 free_list: 1053 return PyErr_NoMemory(); 1054 } 1055 1056 1057 static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist, 1058 PyObject *args, 1059 PyObject *kwargs __maybe_unused) 1060 { 1061 struct evlist *evlist = &pevlist->evlist; 1062 PyObject *pevsel; 1063 struct evsel *evsel; 1064 1065 if (!PyArg_ParseTuple(args, "O", &pevsel)) 1066 return NULL; 1067 1068 Py_INCREF(pevsel); 1069 evsel = &((struct pyrf_evsel *)pevsel)->evsel; 1070 evsel->core.idx = evlist->core.nr_entries; 1071 evlist__add(evlist, evsel); 1072 1073 return Py_BuildValue("i", evlist->core.nr_entries); 1074 } 1075 1076 static struct mmap *get_md(struct evlist *evlist, int cpu) 1077 { 1078 int i; 1079 1080 for (i = 0; i < evlist->core.nr_mmaps; i++) { 1081 struct mmap *md = &evlist->mmap[i]; 1082 1083 if (md->core.cpu.cpu == cpu) 1084 return md; 1085 } 1086 1087 return NULL; 1088 } 1089 1090 static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, 1091 PyObject *args, PyObject *kwargs) 1092 { 1093 struct evlist *evlist = &pevlist->evlist; 1094 union perf_event *event; 1095 int sample_id_all = 1, cpu; 1096 static char *kwlist[] = { "cpu", "sample_id_all", NULL }; 1097 struct mmap *md; 1098 int err; 1099 1100 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist, 1101 &cpu, &sample_id_all)) 1102 return NULL; 1103 1104 md = get_md(evlist, cpu); 1105 if (!md) 1106 return NULL; 1107 1108 if (perf_mmap__read_init(&md->core) < 0) 1109 goto end; 1110 1111 event = perf_mmap__read_event(&md->core); 1112 if (event != NULL) { 1113 PyObject *pyevent = pyrf_event__new(event); 1114 struct pyrf_event *pevent = (struct pyrf_event *)pyevent; 1115 struct evsel *evsel; 1116 1117 if (pyevent == NULL) 1118 return PyErr_NoMemory(); 1119 1120 evsel = evlist__event2evsel(evlist, event); 1121 if (!evsel) { 1122 Py_INCREF(Py_None); 1123 return Py_None; 1124 } 1125 1126 pevent->evsel = evsel; 1127 1128 err = evsel__parse_sample(evsel, event, &pevent->sample); 1129 1130 /* Consume the even only after we parsed it out. */ 1131 perf_mmap__consume(&md->core); 1132 1133 if (err) 1134 return PyErr_Format(PyExc_OSError, 1135 "perf: can't parse sample, err=%d", err); 1136 return pyevent; 1137 } 1138 end: 1139 Py_INCREF(Py_None); 1140 return Py_None; 1141 } 1142 1143 static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist, 1144 PyObject *args, PyObject *kwargs) 1145 { 1146 struct evlist *evlist = &pevlist->evlist; 1147 1148 if (evlist__open(evlist) < 0) { 1149 PyErr_SetFromErrno(PyExc_OSError); 1150 return NULL; 1151 } 1152 1153 Py_INCREF(Py_None); 1154 return Py_None; 1155 } 1156 1157 static PyMethodDef pyrf_evlist__methods[] = { 1158 { 1159 .ml_name = "mmap", 1160 .ml_meth = (PyCFunction)pyrf_evlist__mmap, 1161 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1162 .ml_doc = PyDoc_STR("mmap the file descriptor table.") 1163 }, 1164 { 1165 .ml_name = "open", 1166 .ml_meth = (PyCFunction)pyrf_evlist__open, 1167 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1168 .ml_doc = PyDoc_STR("open the file descriptors.") 1169 }, 1170 { 1171 .ml_name = "poll", 1172 .ml_meth = (PyCFunction)pyrf_evlist__poll, 1173 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1174 .ml_doc = PyDoc_STR("poll the file descriptor table.") 1175 }, 1176 { 1177 .ml_name = "get_pollfd", 1178 .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd, 1179 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1180 .ml_doc = PyDoc_STR("get the poll file descriptor table.") 1181 }, 1182 { 1183 .ml_name = "add", 1184 .ml_meth = (PyCFunction)pyrf_evlist__add, 1185 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1186 .ml_doc = PyDoc_STR("adds an event selector to the list.") 1187 }, 1188 { 1189 .ml_name = "read_on_cpu", 1190 .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu, 1191 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1192 .ml_doc = PyDoc_STR("reads an event.") 1193 }, 1194 { .ml_name = NULL, } 1195 }; 1196 1197 static Py_ssize_t pyrf_evlist__length(PyObject *obj) 1198 { 1199 struct pyrf_evlist *pevlist = (void *)obj; 1200 1201 return pevlist->evlist.core.nr_entries; 1202 } 1203 1204 static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i) 1205 { 1206 struct pyrf_evlist *pevlist = (void *)obj; 1207 struct evsel *pos; 1208 1209 if (i >= pevlist->evlist.core.nr_entries) 1210 return NULL; 1211 1212 evlist__for_each_entry(&pevlist->evlist, pos) { 1213 if (i-- == 0) 1214 break; 1215 } 1216 1217 return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel)); 1218 } 1219 1220 static PySequenceMethods pyrf_evlist__sequence_methods = { 1221 .sq_length = pyrf_evlist__length, 1222 .sq_item = pyrf_evlist__item, 1223 }; 1224 1225 static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object."); 1226 1227 static PyTypeObject pyrf_evlist__type = { 1228 PyVarObject_HEAD_INIT(NULL, 0) 1229 .tp_name = "perf.evlist", 1230 .tp_basicsize = sizeof(struct pyrf_evlist), 1231 .tp_dealloc = (destructor)pyrf_evlist__delete, 1232 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 1233 .tp_as_sequence = &pyrf_evlist__sequence_methods, 1234 .tp_doc = pyrf_evlist__doc, 1235 .tp_methods = pyrf_evlist__methods, 1236 .tp_init = (initproc)pyrf_evlist__init, 1237 }; 1238 1239 static int pyrf_evlist__setup_types(void) 1240 { 1241 pyrf_evlist__type.tp_new = PyType_GenericNew; 1242 return PyType_Ready(&pyrf_evlist__type); 1243 } 1244 1245 #define PERF_CONST(name) { #name, PERF_##name } 1246 1247 static struct { 1248 const char *name; 1249 int value; 1250 } perf__constants[] = { 1251 PERF_CONST(TYPE_HARDWARE), 1252 PERF_CONST(TYPE_SOFTWARE), 1253 PERF_CONST(TYPE_TRACEPOINT), 1254 PERF_CONST(TYPE_HW_CACHE), 1255 PERF_CONST(TYPE_RAW), 1256 PERF_CONST(TYPE_BREAKPOINT), 1257 1258 PERF_CONST(COUNT_HW_CPU_CYCLES), 1259 PERF_CONST(COUNT_HW_INSTRUCTIONS), 1260 PERF_CONST(COUNT_HW_CACHE_REFERENCES), 1261 PERF_CONST(COUNT_HW_CACHE_MISSES), 1262 PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS), 1263 PERF_CONST(COUNT_HW_BRANCH_MISSES), 1264 PERF_CONST(COUNT_HW_BUS_CYCLES), 1265 PERF_CONST(COUNT_HW_CACHE_L1D), 1266 PERF_CONST(COUNT_HW_CACHE_L1I), 1267 PERF_CONST(COUNT_HW_CACHE_LL), 1268 PERF_CONST(COUNT_HW_CACHE_DTLB), 1269 PERF_CONST(COUNT_HW_CACHE_ITLB), 1270 PERF_CONST(COUNT_HW_CACHE_BPU), 1271 PERF_CONST(COUNT_HW_CACHE_OP_READ), 1272 PERF_CONST(COUNT_HW_CACHE_OP_WRITE), 1273 PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH), 1274 PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS), 1275 PERF_CONST(COUNT_HW_CACHE_RESULT_MISS), 1276 1277 PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND), 1278 PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND), 1279 1280 PERF_CONST(COUNT_SW_CPU_CLOCK), 1281 PERF_CONST(COUNT_SW_TASK_CLOCK), 1282 PERF_CONST(COUNT_SW_PAGE_FAULTS), 1283 PERF_CONST(COUNT_SW_CONTEXT_SWITCHES), 1284 PERF_CONST(COUNT_SW_CPU_MIGRATIONS), 1285 PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN), 1286 PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ), 1287 PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS), 1288 PERF_CONST(COUNT_SW_EMULATION_FAULTS), 1289 PERF_CONST(COUNT_SW_DUMMY), 1290 1291 PERF_CONST(SAMPLE_IP), 1292 PERF_CONST(SAMPLE_TID), 1293 PERF_CONST(SAMPLE_TIME), 1294 PERF_CONST(SAMPLE_ADDR), 1295 PERF_CONST(SAMPLE_READ), 1296 PERF_CONST(SAMPLE_CALLCHAIN), 1297 PERF_CONST(SAMPLE_ID), 1298 PERF_CONST(SAMPLE_CPU), 1299 PERF_CONST(SAMPLE_PERIOD), 1300 PERF_CONST(SAMPLE_STREAM_ID), 1301 PERF_CONST(SAMPLE_RAW), 1302 1303 PERF_CONST(FORMAT_TOTAL_TIME_ENABLED), 1304 PERF_CONST(FORMAT_TOTAL_TIME_RUNNING), 1305 PERF_CONST(FORMAT_ID), 1306 PERF_CONST(FORMAT_GROUP), 1307 1308 PERF_CONST(RECORD_MMAP), 1309 PERF_CONST(RECORD_LOST), 1310 PERF_CONST(RECORD_COMM), 1311 PERF_CONST(RECORD_EXIT), 1312 PERF_CONST(RECORD_THROTTLE), 1313 PERF_CONST(RECORD_UNTHROTTLE), 1314 PERF_CONST(RECORD_FORK), 1315 PERF_CONST(RECORD_READ), 1316 PERF_CONST(RECORD_SAMPLE), 1317 PERF_CONST(RECORD_MMAP2), 1318 PERF_CONST(RECORD_AUX), 1319 PERF_CONST(RECORD_ITRACE_START), 1320 PERF_CONST(RECORD_LOST_SAMPLES), 1321 PERF_CONST(RECORD_SWITCH), 1322 PERF_CONST(RECORD_SWITCH_CPU_WIDE), 1323 1324 PERF_CONST(RECORD_MISC_SWITCH_OUT), 1325 { .name = NULL, }, 1326 }; 1327 1328 static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel, 1329 PyObject *args, PyObject *kwargs) 1330 { 1331 #ifndef HAVE_LIBTRACEEVENT 1332 return NULL; 1333 #else 1334 struct tep_event *tp_format; 1335 static char *kwlist[] = { "sys", "name", NULL }; 1336 char *sys = NULL; 1337 char *name = NULL; 1338 1339 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist, 1340 &sys, &name)) 1341 return NULL; 1342 1343 tp_format = trace_event__tp_format(sys, name); 1344 if (IS_ERR(tp_format)) 1345 return _PyLong_FromLong(-1); 1346 1347 return _PyLong_FromLong(tp_format->id); 1348 #endif // HAVE_LIBTRACEEVENT 1349 } 1350 1351 static PyMethodDef perf__methods[] = { 1352 { 1353 .ml_name = "tracepoint", 1354 .ml_meth = (PyCFunction) pyrf__tracepoint, 1355 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1356 .ml_doc = PyDoc_STR("Get tracepoint config.") 1357 }, 1358 { .ml_name = NULL, } 1359 }; 1360 1361 #if PY_MAJOR_VERSION < 3 1362 PyMODINIT_FUNC initperf(void) 1363 #else 1364 PyMODINIT_FUNC PyInit_perf(void) 1365 #endif 1366 { 1367 PyObject *obj; 1368 int i; 1369 PyObject *dict; 1370 #if PY_MAJOR_VERSION < 3 1371 PyObject *module = Py_InitModule("perf", perf__methods); 1372 #else 1373 static struct PyModuleDef moduledef = { 1374 PyModuleDef_HEAD_INIT, 1375 "perf", /* m_name */ 1376 "", /* m_doc */ 1377 -1, /* m_size */ 1378 perf__methods, /* m_methods */ 1379 NULL, /* m_reload */ 1380 NULL, /* m_traverse */ 1381 NULL, /* m_clear */ 1382 NULL, /* m_free */ 1383 }; 1384 PyObject *module = PyModule_Create(&moduledef); 1385 #endif 1386 1387 if (module == NULL || 1388 pyrf_event__setup_types() < 0 || 1389 pyrf_evlist__setup_types() < 0 || 1390 pyrf_evsel__setup_types() < 0 || 1391 pyrf_thread_map__setup_types() < 0 || 1392 pyrf_cpu_map__setup_types() < 0) 1393 #if PY_MAJOR_VERSION < 3 1394 return; 1395 #else 1396 return module; 1397 #endif 1398 1399 /* The page_size is placed in util object. */ 1400 page_size = sysconf(_SC_PAGE_SIZE); 1401 1402 Py_INCREF(&pyrf_evlist__type); 1403 PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type); 1404 1405 Py_INCREF(&pyrf_evsel__type); 1406 PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type); 1407 1408 Py_INCREF(&pyrf_mmap_event__type); 1409 PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type); 1410 1411 Py_INCREF(&pyrf_lost_event__type); 1412 PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type); 1413 1414 Py_INCREF(&pyrf_comm_event__type); 1415 PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type); 1416 1417 Py_INCREF(&pyrf_task_event__type); 1418 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type); 1419 1420 Py_INCREF(&pyrf_throttle_event__type); 1421 PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type); 1422 1423 Py_INCREF(&pyrf_task_event__type); 1424 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type); 1425 1426 Py_INCREF(&pyrf_read_event__type); 1427 PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type); 1428 1429 Py_INCREF(&pyrf_sample_event__type); 1430 PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type); 1431 1432 Py_INCREF(&pyrf_context_switch_event__type); 1433 PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type); 1434 1435 Py_INCREF(&pyrf_thread_map__type); 1436 PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type); 1437 1438 Py_INCREF(&pyrf_cpu_map__type); 1439 PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type); 1440 1441 dict = PyModule_GetDict(module); 1442 if (dict == NULL) 1443 goto error; 1444 1445 for (i = 0; perf__constants[i].name != NULL; i++) { 1446 obj = _PyLong_FromLong(perf__constants[i].value); 1447 if (obj == NULL) 1448 goto error; 1449 PyDict_SetItemString(dict, perf__constants[i].name, obj); 1450 Py_DECREF(obj); 1451 } 1452 1453 error: 1454 if (PyErr_Occurred()) 1455 PyErr_SetString(PyExc_ImportError, "perf: Init failed!"); 1456 #if PY_MAJOR_VERSION >= 3 1457 return module; 1458 #endif 1459 } 1460 1461 /* 1462 * Dummy, to avoid dragging all the test_attr infrastructure in the python 1463 * binding. 1464 */ 1465 void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu, 1466 int fd, int group_fd, unsigned long flags) 1467 { 1468 } 1469