1 // SPDX-License-Identifier: GPL-2.0 2 #include <Python.h> 3 #include <structmember.h> 4 #include <inttypes.h> 5 #include <poll.h> 6 #include <linux/err.h> 7 #include <perf/cpumap.h> 8 #ifdef HAVE_LIBTRACEEVENT 9 #include <event-parse.h> 10 #endif 11 #include <perf/mmap.h> 12 #include "callchain.h" 13 #include "counts.h" 14 #include "evlist.h" 15 #include "evsel.h" 16 #include "event.h" 17 #include "expr.h" 18 #include "print_binary.h" 19 #include "record.h" 20 #include "strbuf.h" 21 #include "thread_map.h" 22 #include "tp_pmu.h" 23 #include "trace-event.h" 24 #include "metricgroup.h" 25 #include "mmap.h" 26 #include "util/sample.h" 27 #include <internal/lib.h> 28 29 PyMODINIT_FUNC PyInit_perf(void); 30 31 #define member_def(type, member, ptype, help) \ 32 { #member, ptype, \ 33 offsetof(struct pyrf_event, event) + offsetof(struct type, member), \ 34 0, help } 35 36 #define sample_member_def(name, member, ptype, help) \ 37 { #name, ptype, \ 38 offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \ 39 0, help } 40 41 struct pyrf_event { 42 PyObject_HEAD 43 struct evsel *evsel; 44 struct perf_sample sample; 45 union perf_event event; 46 }; 47 48 #define sample_members \ 49 sample_member_def(sample_ip, ip, T_ULONGLONG, "event ip"), \ 50 sample_member_def(sample_pid, pid, T_INT, "event pid"), \ 51 sample_member_def(sample_tid, tid, T_INT, "event tid"), \ 52 sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \ 53 sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \ 54 sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \ 55 sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \ 56 sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \ 57 sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"), 58 59 static const char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object."); 60 61 static PyMemberDef pyrf_mmap_event__members[] = { 62 sample_members 63 member_def(perf_event_header, type, T_UINT, "event type"), 64 member_def(perf_event_header, misc, T_UINT, "event misc"), 65 member_def(perf_record_mmap, pid, T_UINT, "event pid"), 66 member_def(perf_record_mmap, tid, T_UINT, "event tid"), 67 member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"), 68 member_def(perf_record_mmap, len, T_ULONGLONG, "map length"), 69 member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"), 70 member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"), 71 { .name = NULL, }, 72 }; 73 74 static PyObject *pyrf_mmap_event__repr(const struct pyrf_event *pevent) 75 { 76 PyObject *ret; 77 char *s; 78 79 if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", " 80 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", " 81 "filename: %s }", 82 pevent->event.mmap.pid, pevent->event.mmap.tid, 83 pevent->event.mmap.start, pevent->event.mmap.len, 84 pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) { 85 ret = PyErr_NoMemory(); 86 } else { 87 ret = PyUnicode_FromString(s); 88 free(s); 89 } 90 return ret; 91 } 92 93 static PyTypeObject pyrf_mmap_event__type = { 94 PyVarObject_HEAD_INIT(NULL, 0) 95 .tp_name = "perf.mmap_event", 96 .tp_basicsize = sizeof(struct pyrf_event), 97 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 98 .tp_doc = pyrf_mmap_event__doc, 99 .tp_members = pyrf_mmap_event__members, 100 .tp_repr = (reprfunc)pyrf_mmap_event__repr, 101 }; 102 103 static const char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object."); 104 105 static PyMemberDef pyrf_task_event__members[] = { 106 sample_members 107 member_def(perf_event_header, type, T_UINT, "event type"), 108 member_def(perf_record_fork, pid, T_UINT, "event pid"), 109 member_def(perf_record_fork, ppid, T_UINT, "event ppid"), 110 member_def(perf_record_fork, tid, T_UINT, "event tid"), 111 member_def(perf_record_fork, ptid, T_UINT, "event ptid"), 112 member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"), 113 { .name = NULL, }, 114 }; 115 116 static PyObject *pyrf_task_event__repr(const struct pyrf_event *pevent) 117 { 118 return PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, " 119 "ptid: %u, time: %" PRI_lu64 "}", 120 pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit", 121 pevent->event.fork.pid, 122 pevent->event.fork.ppid, 123 pevent->event.fork.tid, 124 pevent->event.fork.ptid, 125 pevent->event.fork.time); 126 } 127 128 static PyTypeObject pyrf_task_event__type = { 129 PyVarObject_HEAD_INIT(NULL, 0) 130 .tp_name = "perf.task_event", 131 .tp_basicsize = sizeof(struct pyrf_event), 132 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 133 .tp_doc = pyrf_task_event__doc, 134 .tp_members = pyrf_task_event__members, 135 .tp_repr = (reprfunc)pyrf_task_event__repr, 136 }; 137 138 static const char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object."); 139 140 static PyMemberDef pyrf_comm_event__members[] = { 141 sample_members 142 member_def(perf_event_header, type, T_UINT, "event type"), 143 member_def(perf_record_comm, pid, T_UINT, "event pid"), 144 member_def(perf_record_comm, tid, T_UINT, "event tid"), 145 member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"), 146 { .name = NULL, }, 147 }; 148 149 static PyObject *pyrf_comm_event__repr(const struct pyrf_event *pevent) 150 { 151 return PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }", 152 pevent->event.comm.pid, 153 pevent->event.comm.tid, 154 pevent->event.comm.comm); 155 } 156 157 static PyTypeObject pyrf_comm_event__type = { 158 PyVarObject_HEAD_INIT(NULL, 0) 159 .tp_name = "perf.comm_event", 160 .tp_basicsize = sizeof(struct pyrf_event), 161 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 162 .tp_doc = pyrf_comm_event__doc, 163 .tp_members = pyrf_comm_event__members, 164 .tp_repr = (reprfunc)pyrf_comm_event__repr, 165 }; 166 167 static const char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object."); 168 169 static PyMemberDef pyrf_throttle_event__members[] = { 170 sample_members 171 member_def(perf_event_header, type, T_UINT, "event type"), 172 member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"), 173 member_def(perf_record_throttle, id, T_ULONGLONG, "event id"), 174 member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"), 175 { .name = NULL, }, 176 }; 177 178 static PyObject *pyrf_throttle_event__repr(const struct pyrf_event *pevent) 179 { 180 const struct perf_record_throttle *te = (const struct perf_record_throttle *) 181 (&pevent->event.header + 1); 182 183 return PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64 184 ", stream_id: %" PRI_lu64 " }", 185 pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un", 186 te->time, te->id, te->stream_id); 187 } 188 189 static PyTypeObject pyrf_throttle_event__type = { 190 PyVarObject_HEAD_INIT(NULL, 0) 191 .tp_name = "perf.throttle_event", 192 .tp_basicsize = sizeof(struct pyrf_event), 193 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 194 .tp_doc = pyrf_throttle_event__doc, 195 .tp_members = pyrf_throttle_event__members, 196 .tp_repr = (reprfunc)pyrf_throttle_event__repr, 197 }; 198 199 static const char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object."); 200 201 static PyMemberDef pyrf_lost_event__members[] = { 202 sample_members 203 member_def(perf_record_lost, id, T_ULONGLONG, "event id"), 204 member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"), 205 { .name = NULL, }, 206 }; 207 208 static PyObject *pyrf_lost_event__repr(const struct pyrf_event *pevent) 209 { 210 PyObject *ret; 211 char *s; 212 213 if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", " 214 "lost: %#" PRI_lx64 " }", 215 pevent->event.lost.id, pevent->event.lost.lost) < 0) { 216 ret = PyErr_NoMemory(); 217 } else { 218 ret = PyUnicode_FromString(s); 219 free(s); 220 } 221 return ret; 222 } 223 224 static PyTypeObject pyrf_lost_event__type = { 225 PyVarObject_HEAD_INIT(NULL, 0) 226 .tp_name = "perf.lost_event", 227 .tp_basicsize = sizeof(struct pyrf_event), 228 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 229 .tp_doc = pyrf_lost_event__doc, 230 .tp_members = pyrf_lost_event__members, 231 .tp_repr = (reprfunc)pyrf_lost_event__repr, 232 }; 233 234 static const char pyrf_read_event__doc[] = PyDoc_STR("perf read event object."); 235 236 static PyMemberDef pyrf_read_event__members[] = { 237 sample_members 238 member_def(perf_record_read, pid, T_UINT, "event pid"), 239 member_def(perf_record_read, tid, T_UINT, "event tid"), 240 { .name = NULL, }, 241 }; 242 243 static PyObject *pyrf_read_event__repr(const struct pyrf_event *pevent) 244 { 245 return PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }", 246 pevent->event.read.pid, 247 pevent->event.read.tid); 248 /* 249 * FIXME: return the array of read values, 250 * making this method useful ;-) 251 */ 252 } 253 254 static PyTypeObject pyrf_read_event__type = { 255 PyVarObject_HEAD_INIT(NULL, 0) 256 .tp_name = "perf.read_event", 257 .tp_basicsize = sizeof(struct pyrf_event), 258 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 259 .tp_doc = pyrf_read_event__doc, 260 .tp_members = pyrf_read_event__members, 261 .tp_repr = (reprfunc)pyrf_read_event__repr, 262 }; 263 264 static const char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object."); 265 266 static PyMemberDef pyrf_sample_event__members[] = { 267 sample_members 268 member_def(perf_event_header, type, T_UINT, "event type"), 269 { .name = NULL, }, 270 }; 271 272 static void pyrf_sample_event__delete(struct pyrf_event *pevent) 273 { 274 perf_sample__exit(&pevent->sample); 275 Py_TYPE(pevent)->tp_free((PyObject*)pevent); 276 } 277 278 static PyObject *pyrf_sample_event__repr(const struct pyrf_event *pevent) 279 { 280 PyObject *ret; 281 char *s; 282 283 if (asprintf(&s, "{ type: sample }") < 0) { 284 ret = PyErr_NoMemory(); 285 } else { 286 ret = PyUnicode_FromString(s); 287 free(s); 288 } 289 return ret; 290 } 291 292 #ifdef HAVE_LIBTRACEEVENT 293 static bool is_tracepoint(const struct pyrf_event *pevent) 294 { 295 return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT; 296 } 297 298 static PyObject* 299 tracepoint_field(const struct pyrf_event *pe, struct tep_format_field *field) 300 { 301 struct tep_handle *pevent = field->event->tep; 302 void *data = pe->sample.raw_data; 303 PyObject *ret = NULL; 304 unsigned long long val; 305 unsigned int offset, len; 306 307 if (field->flags & TEP_FIELD_IS_ARRAY) { 308 offset = field->offset; 309 len = field->size; 310 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 311 val = tep_read_number(pevent, data + offset, len); 312 offset = val; 313 len = offset >> 16; 314 offset &= 0xffff; 315 if (tep_field_is_relative(field->flags)) 316 offset += field->offset + field->size; 317 } 318 if (field->flags & TEP_FIELD_IS_STRING && 319 is_printable_array(data + offset, len)) { 320 ret = PyUnicode_FromString((char *)data + offset); 321 } else { 322 ret = PyByteArray_FromStringAndSize((const char *) data + offset, len); 323 field->flags &= ~TEP_FIELD_IS_STRING; 324 } 325 } else { 326 val = tep_read_number(pevent, data + field->offset, 327 field->size); 328 if (field->flags & TEP_FIELD_IS_POINTER) 329 ret = PyLong_FromUnsignedLong((unsigned long) val); 330 else if (field->flags & TEP_FIELD_IS_SIGNED) 331 ret = PyLong_FromLong((long) val); 332 else 333 ret = PyLong_FromUnsignedLong((unsigned long) val); 334 } 335 336 return ret; 337 } 338 339 static PyObject* 340 get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name) 341 { 342 struct evsel *evsel = pevent->evsel; 343 struct tep_event *tp_format = evsel__tp_format(evsel); 344 struct tep_format_field *field; 345 346 if (IS_ERR_OR_NULL(tp_format)) 347 return NULL; 348 349 PyObject *obj = PyObject_Str(attr_name); 350 if (obj == NULL) 351 return NULL; 352 353 const char *str = PyUnicode_AsUTF8(obj); 354 if (str == NULL) { 355 Py_DECREF(obj); 356 return NULL; 357 } 358 359 field = tep_find_any_field(tp_format, str); 360 Py_DECREF(obj); 361 return field ? tracepoint_field(pevent, field) : NULL; 362 } 363 #endif /* HAVE_LIBTRACEEVENT */ 364 365 static PyObject* 366 pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name) 367 { 368 PyObject *obj = NULL; 369 370 #ifdef HAVE_LIBTRACEEVENT 371 if (is_tracepoint(pevent)) 372 obj = get_tracepoint_field(pevent, attr_name); 373 #endif 374 375 return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name); 376 } 377 378 static PyTypeObject pyrf_sample_event__type = { 379 PyVarObject_HEAD_INIT(NULL, 0) 380 .tp_name = "perf.sample_event", 381 .tp_basicsize = sizeof(struct pyrf_event), 382 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 383 .tp_doc = pyrf_sample_event__doc, 384 .tp_members = pyrf_sample_event__members, 385 .tp_repr = (reprfunc)pyrf_sample_event__repr, 386 .tp_getattro = (getattrofunc) pyrf_sample_event__getattro, 387 }; 388 389 static const char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object."); 390 391 static PyMemberDef pyrf_context_switch_event__members[] = { 392 sample_members 393 member_def(perf_event_header, type, T_UINT, "event type"), 394 member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"), 395 member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"), 396 { .name = NULL, }, 397 }; 398 399 static PyObject *pyrf_context_switch_event__repr(const struct pyrf_event *pevent) 400 { 401 PyObject *ret; 402 char *s; 403 404 if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }", 405 pevent->event.context_switch.next_prev_pid, 406 pevent->event.context_switch.next_prev_tid, 407 !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) { 408 ret = PyErr_NoMemory(); 409 } else { 410 ret = PyUnicode_FromString(s); 411 free(s); 412 } 413 return ret; 414 } 415 416 static PyTypeObject pyrf_context_switch_event__type = { 417 PyVarObject_HEAD_INIT(NULL, 0) 418 .tp_name = "perf.context_switch_event", 419 .tp_basicsize = sizeof(struct pyrf_event), 420 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 421 .tp_doc = pyrf_context_switch_event__doc, 422 .tp_members = pyrf_context_switch_event__members, 423 .tp_repr = (reprfunc)pyrf_context_switch_event__repr, 424 }; 425 426 static int pyrf_event__setup_types(void) 427 { 428 int err; 429 pyrf_mmap_event__type.tp_new = 430 pyrf_task_event__type.tp_new = 431 pyrf_comm_event__type.tp_new = 432 pyrf_lost_event__type.tp_new = 433 pyrf_read_event__type.tp_new = 434 pyrf_sample_event__type.tp_new = 435 pyrf_context_switch_event__type.tp_new = 436 pyrf_throttle_event__type.tp_new = PyType_GenericNew; 437 438 pyrf_sample_event__type.tp_dealloc = (destructor)pyrf_sample_event__delete, 439 440 err = PyType_Ready(&pyrf_mmap_event__type); 441 if (err < 0) 442 goto out; 443 err = PyType_Ready(&pyrf_lost_event__type); 444 if (err < 0) 445 goto out; 446 err = PyType_Ready(&pyrf_task_event__type); 447 if (err < 0) 448 goto out; 449 err = PyType_Ready(&pyrf_comm_event__type); 450 if (err < 0) 451 goto out; 452 err = PyType_Ready(&pyrf_throttle_event__type); 453 if (err < 0) 454 goto out; 455 err = PyType_Ready(&pyrf_read_event__type); 456 if (err < 0) 457 goto out; 458 err = PyType_Ready(&pyrf_sample_event__type); 459 if (err < 0) 460 goto out; 461 err = PyType_Ready(&pyrf_context_switch_event__type); 462 if (err < 0) 463 goto out; 464 out: 465 return err; 466 } 467 468 static PyTypeObject *pyrf_event__type[] = { 469 [PERF_RECORD_MMAP] = &pyrf_mmap_event__type, 470 [PERF_RECORD_LOST] = &pyrf_lost_event__type, 471 [PERF_RECORD_COMM] = &pyrf_comm_event__type, 472 [PERF_RECORD_EXIT] = &pyrf_task_event__type, 473 [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type, 474 [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type, 475 [PERF_RECORD_FORK] = &pyrf_task_event__type, 476 [PERF_RECORD_READ] = &pyrf_read_event__type, 477 [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type, 478 [PERF_RECORD_SWITCH] = &pyrf_context_switch_event__type, 479 [PERF_RECORD_SWITCH_CPU_WIDE] = &pyrf_context_switch_event__type, 480 }; 481 482 static PyObject *pyrf_event__new(const union perf_event *event) 483 { 484 struct pyrf_event *pevent; 485 PyTypeObject *ptype; 486 487 if ((event->header.type < PERF_RECORD_MMAP || 488 event->header.type > PERF_RECORD_SAMPLE) && 489 !(event->header.type == PERF_RECORD_SWITCH || 490 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)) { 491 PyErr_Format(PyExc_TypeError, "Unexpected header type %u", 492 event->header.type); 493 return NULL; 494 } 495 496 // FIXME this better be dynamic or we need to parse everything 497 // before calling perf_mmap__consume(), including tracepoint fields. 498 if (sizeof(pevent->event) < event->header.size) { 499 PyErr_Format(PyExc_TypeError, "Unexpected event size: %zd < %u", 500 sizeof(pevent->event), event->header.size); 501 return NULL; 502 } 503 504 ptype = pyrf_event__type[event->header.type]; 505 pevent = PyObject_New(struct pyrf_event, ptype); 506 if (pevent != NULL) 507 memcpy(&pevent->event, event, event->header.size); 508 return (PyObject *)pevent; 509 } 510 511 struct pyrf_cpu_map { 512 PyObject_HEAD 513 514 struct perf_cpu_map *cpus; 515 }; 516 517 static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus, 518 PyObject *args, PyObject *kwargs) 519 { 520 static char *kwlist[] = { "cpustr", NULL }; 521 char *cpustr = NULL; 522 523 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s", 524 kwlist, &cpustr)) 525 return -1; 526 527 pcpus->cpus = perf_cpu_map__new(cpustr); 528 if (pcpus->cpus == NULL) 529 return -1; 530 return 0; 531 } 532 533 static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus) 534 { 535 perf_cpu_map__put(pcpus->cpus); 536 Py_TYPE(pcpus)->tp_free((PyObject*)pcpus); 537 } 538 539 static Py_ssize_t pyrf_cpu_map__length(PyObject *obj) 540 { 541 struct pyrf_cpu_map *pcpus = (void *)obj; 542 543 return perf_cpu_map__nr(pcpus->cpus); 544 } 545 546 static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i) 547 { 548 struct pyrf_cpu_map *pcpus = (void *)obj; 549 550 if (i >= perf_cpu_map__nr(pcpus->cpus)) { 551 PyErr_SetString(PyExc_IndexError, "Index out of range"); 552 return NULL; 553 } 554 555 return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu); 556 } 557 558 static PySequenceMethods pyrf_cpu_map__sequence_methods = { 559 .sq_length = pyrf_cpu_map__length, 560 .sq_item = pyrf_cpu_map__item, 561 }; 562 563 static const char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object."); 564 565 static PyTypeObject pyrf_cpu_map__type = { 566 PyVarObject_HEAD_INIT(NULL, 0) 567 .tp_name = "perf.cpu_map", 568 .tp_basicsize = sizeof(struct pyrf_cpu_map), 569 .tp_dealloc = (destructor)pyrf_cpu_map__delete, 570 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 571 .tp_doc = pyrf_cpu_map__doc, 572 .tp_as_sequence = &pyrf_cpu_map__sequence_methods, 573 .tp_init = (initproc)pyrf_cpu_map__init, 574 }; 575 576 static int pyrf_cpu_map__setup_types(void) 577 { 578 pyrf_cpu_map__type.tp_new = PyType_GenericNew; 579 return PyType_Ready(&pyrf_cpu_map__type); 580 } 581 582 struct pyrf_thread_map { 583 PyObject_HEAD 584 585 struct perf_thread_map *threads; 586 }; 587 588 static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads, 589 PyObject *args, PyObject *kwargs) 590 { 591 static char *kwlist[] = { "pid", "tid", NULL }; 592 int pid = -1, tid = -1; 593 594 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", 595 kwlist, &pid, &tid)) 596 return -1; 597 598 pthreads->threads = thread_map__new(pid, tid); 599 if (pthreads->threads == NULL) 600 return -1; 601 return 0; 602 } 603 604 static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads) 605 { 606 perf_thread_map__put(pthreads->threads); 607 Py_TYPE(pthreads)->tp_free((PyObject*)pthreads); 608 } 609 610 static Py_ssize_t pyrf_thread_map__length(PyObject *obj) 611 { 612 struct pyrf_thread_map *pthreads = (void *)obj; 613 614 return perf_thread_map__nr(pthreads->threads); 615 } 616 617 static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i) 618 { 619 struct pyrf_thread_map *pthreads = (void *)obj; 620 621 if (i >= perf_thread_map__nr(pthreads->threads)) { 622 PyErr_SetString(PyExc_IndexError, "Index out of range"); 623 return NULL; 624 } 625 626 return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i)); 627 } 628 629 static PySequenceMethods pyrf_thread_map__sequence_methods = { 630 .sq_length = pyrf_thread_map__length, 631 .sq_item = pyrf_thread_map__item, 632 }; 633 634 static const char pyrf_thread_map__doc[] = PyDoc_STR("thread map object."); 635 636 static PyTypeObject pyrf_thread_map__type = { 637 PyVarObject_HEAD_INIT(NULL, 0) 638 .tp_name = "perf.thread_map", 639 .tp_basicsize = sizeof(struct pyrf_thread_map), 640 .tp_dealloc = (destructor)pyrf_thread_map__delete, 641 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 642 .tp_doc = pyrf_thread_map__doc, 643 .tp_as_sequence = &pyrf_thread_map__sequence_methods, 644 .tp_init = (initproc)pyrf_thread_map__init, 645 }; 646 647 static int pyrf_thread_map__setup_types(void) 648 { 649 pyrf_thread_map__type.tp_new = PyType_GenericNew; 650 return PyType_Ready(&pyrf_thread_map__type); 651 } 652 653 /** 654 * A python wrapper for perf_pmus that are globally owned by the pmus.c code. 655 */ 656 struct pyrf_pmu { 657 PyObject_HEAD 658 659 struct perf_pmu *pmu; 660 }; 661 662 static void pyrf_pmu__delete(struct pyrf_pmu *ppmu) 663 { 664 Py_TYPE(ppmu)->tp_free((PyObject *)ppmu); 665 } 666 667 static PyObject *pyrf_pmu__name(PyObject *self) 668 { 669 struct pyrf_pmu *ppmu = (void *)self; 670 671 return PyUnicode_FromString(ppmu->pmu->name); 672 } 673 674 static bool add_to_dict(PyObject *dict, const char *key, const char *value) 675 { 676 PyObject *pkey, *pvalue; 677 bool ret; 678 679 if (value == NULL) 680 return true; 681 682 pkey = PyUnicode_FromString(key); 683 pvalue = PyUnicode_FromString(value); 684 685 ret = pkey && pvalue && PyDict_SetItem(dict, pkey, pvalue) == 0; 686 Py_XDECREF(pkey); 687 Py_XDECREF(pvalue); 688 return ret; 689 } 690 691 static int pyrf_pmu__events_cb(void *state, struct pmu_event_info *info) 692 { 693 PyObject *py_list = state; 694 PyObject *dict = PyDict_New(); 695 696 if (!dict) 697 return -ENOMEM; 698 699 if (!add_to_dict(dict, "name", info->name) || 700 !add_to_dict(dict, "alias", info->alias) || 701 !add_to_dict(dict, "scale_unit", info->scale_unit) || 702 !add_to_dict(dict, "desc", info->desc) || 703 !add_to_dict(dict, "long_desc", info->long_desc) || 704 !add_to_dict(dict, "encoding_desc", info->encoding_desc) || 705 !add_to_dict(dict, "topic", info->topic) || 706 !add_to_dict(dict, "event_type_desc", info->event_type_desc) || 707 !add_to_dict(dict, "str", info->str) || 708 !add_to_dict(dict, "deprecated", info->deprecated ? "deprecated" : NULL) || 709 PyList_Append(py_list, dict) != 0) { 710 Py_DECREF(dict); 711 return -ENOMEM; 712 } 713 Py_DECREF(dict); 714 return 0; 715 } 716 717 static PyObject *pyrf_pmu__events(PyObject *self) 718 { 719 struct pyrf_pmu *ppmu = (void *)self; 720 PyObject *py_list = PyList_New(0); 721 int ret; 722 723 if (!py_list) 724 return NULL; 725 726 ret = perf_pmu__for_each_event(ppmu->pmu, 727 /*skip_duplicate_pmus=*/false, 728 py_list, 729 pyrf_pmu__events_cb); 730 if (ret) { 731 Py_DECREF(py_list); 732 errno = -ret; 733 PyErr_SetFromErrno(PyExc_OSError); 734 return NULL; 735 } 736 return py_list; 737 } 738 739 static PyObject *pyrf_pmu__repr(PyObject *self) 740 { 741 struct pyrf_pmu *ppmu = (void *)self; 742 743 return PyUnicode_FromFormat("pmu(%s)", ppmu->pmu->name); 744 } 745 746 static const char pyrf_pmu__doc[] = PyDoc_STR("perf Performance Monitoring Unit (PMU) object."); 747 748 static PyMethodDef pyrf_pmu__methods[] = { 749 { 750 .ml_name = "events", 751 .ml_meth = (PyCFunction)pyrf_pmu__events, 752 .ml_flags = METH_NOARGS, 753 .ml_doc = PyDoc_STR("Returns a sequence of events encoded as a dictionaries.") 754 }, 755 { 756 .ml_name = "name", 757 .ml_meth = (PyCFunction)pyrf_pmu__name, 758 .ml_flags = METH_NOARGS, 759 .ml_doc = PyDoc_STR("Name of the PMU including suffixes.") 760 }, 761 { .ml_name = NULL, } 762 }; 763 764 /** The python type for a perf.pmu. */ 765 static PyTypeObject pyrf_pmu__type = { 766 PyVarObject_HEAD_INIT(NULL, 0) 767 .tp_name = "perf.pmu", 768 .tp_basicsize = sizeof(struct pyrf_pmu), 769 .tp_dealloc = (destructor)pyrf_pmu__delete, 770 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 771 .tp_doc = pyrf_pmu__doc, 772 .tp_methods = pyrf_pmu__methods, 773 .tp_str = pyrf_pmu__name, 774 .tp_repr = pyrf_pmu__repr, 775 }; 776 777 static int pyrf_pmu__setup_types(void) 778 { 779 pyrf_pmu__type.tp_new = PyType_GenericNew; 780 return PyType_Ready(&pyrf_pmu__type); 781 } 782 783 784 /** A python iterator for pmus that has no equivalent in the C code. */ 785 struct pyrf_pmu_iterator { 786 PyObject_HEAD 787 struct perf_pmu *pmu; 788 }; 789 790 static void pyrf_pmu_iterator__dealloc(struct pyrf_pmu_iterator *self) 791 { 792 Py_TYPE(self)->tp_free((PyObject *) self); 793 } 794 795 static PyObject *pyrf_pmu_iterator__new(PyTypeObject *type, PyObject *args __maybe_unused, 796 PyObject *kwds __maybe_unused) 797 { 798 struct pyrf_pmu_iterator *itr = (void *)type->tp_alloc(type, 0); 799 800 if (itr != NULL) 801 itr->pmu = perf_pmus__scan(/*pmu=*/NULL); 802 803 return (PyObject *) itr; 804 } 805 806 static PyObject *pyrf_pmu_iterator__iter(PyObject *self) 807 { 808 Py_INCREF(self); 809 return self; 810 } 811 812 static PyObject *pyrf_pmu_iterator__iternext(PyObject *self) 813 { 814 struct pyrf_pmu_iterator *itr = (void *)self; 815 struct pyrf_pmu *ppmu; 816 817 if (itr->pmu == NULL) { 818 PyErr_SetNone(PyExc_StopIteration); 819 return NULL; 820 } 821 // Create object to return. 822 ppmu = PyObject_New(struct pyrf_pmu, &pyrf_pmu__type); 823 if (ppmu) { 824 ppmu->pmu = itr->pmu; 825 // Advance iterator. 826 itr->pmu = perf_pmus__scan(itr->pmu); 827 } 828 return (PyObject *)ppmu; 829 } 830 831 /** The python type for the PMU iterator. */ 832 static PyTypeObject pyrf_pmu_iterator__type = { 833 PyVarObject_HEAD_INIT(NULL, 0) 834 .tp_name = "pmus.iterator", 835 .tp_doc = "Iterator for the pmus string sequence.", 836 .tp_basicsize = sizeof(struct pyrf_pmu_iterator), 837 .tp_itemsize = 0, 838 .tp_flags = Py_TPFLAGS_DEFAULT, 839 .tp_new = pyrf_pmu_iterator__new, 840 .tp_dealloc = (destructor) pyrf_pmu_iterator__dealloc, 841 .tp_iter = pyrf_pmu_iterator__iter, 842 .tp_iternext = pyrf_pmu_iterator__iternext, 843 }; 844 845 static int pyrf_pmu_iterator__setup_types(void) 846 { 847 return PyType_Ready(&pyrf_pmu_iterator__type); 848 } 849 850 static PyObject *pyrf__pmus(PyObject *self, PyObject *args) 851 { 852 // Calling the class creates an instance of the iterator. 853 return PyObject_CallObject((PyObject *) &pyrf_pmu_iterator__type, /*args=*/NULL); 854 } 855 856 struct pyrf_counts_values { 857 PyObject_HEAD 858 859 struct perf_counts_values values; 860 }; 861 862 static const char pyrf_counts_values__doc[] = PyDoc_STR("perf counts values object."); 863 864 static void pyrf_counts_values__delete(struct pyrf_counts_values *pcounts_values) 865 { 866 Py_TYPE(pcounts_values)->tp_free((PyObject *)pcounts_values); 867 } 868 869 #define counts_values_member_def(member, ptype, help) \ 870 { #member, ptype, \ 871 offsetof(struct pyrf_counts_values, values.member), \ 872 0, help } 873 874 static PyMemberDef pyrf_counts_values_members[] = { 875 counts_values_member_def(val, T_ULONG, "Value of event"), 876 counts_values_member_def(ena, T_ULONG, "Time for which enabled"), 877 counts_values_member_def(run, T_ULONG, "Time for which running"), 878 counts_values_member_def(id, T_ULONG, "Unique ID for an event"), 879 counts_values_member_def(lost, T_ULONG, "Num of lost samples"), 880 { .name = NULL, }, 881 }; 882 883 static PyObject *pyrf_counts_values_get_values(struct pyrf_counts_values *self, void *closure) 884 { 885 PyObject *vals = PyList_New(5); 886 887 if (!vals) 888 return NULL; 889 for (int i = 0; i < 5; i++) 890 PyList_SetItem(vals, i, PyLong_FromLong(self->values.values[i])); 891 892 return vals; 893 } 894 895 static int pyrf_counts_values_set_values(struct pyrf_counts_values *self, PyObject *list, 896 void *closure) 897 { 898 Py_ssize_t size; 899 PyObject *item = NULL; 900 901 if (!PyList_Check(list)) { 902 PyErr_SetString(PyExc_TypeError, "Value assigned must be a list"); 903 return -1; 904 } 905 906 size = PyList_Size(list); 907 for (Py_ssize_t i = 0; i < size; i++) { 908 item = PyList_GetItem(list, i); 909 if (!PyLong_Check(item)) { 910 PyErr_SetString(PyExc_TypeError, "List members should be numbers"); 911 return -1; 912 } 913 self->values.values[i] = PyLong_AsLong(item); 914 } 915 916 return 0; 917 } 918 919 static PyGetSetDef pyrf_counts_values_getset[] = { 920 {"values", (getter)pyrf_counts_values_get_values, (setter)pyrf_counts_values_set_values, 921 "Name field", NULL}, 922 { .name = NULL, }, 923 }; 924 925 static PyTypeObject pyrf_counts_values__type = { 926 PyVarObject_HEAD_INIT(NULL, 0) 927 .tp_name = "perf.counts_values", 928 .tp_basicsize = sizeof(struct pyrf_counts_values), 929 .tp_dealloc = (destructor)pyrf_counts_values__delete, 930 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 931 .tp_doc = pyrf_counts_values__doc, 932 .tp_members = pyrf_counts_values_members, 933 .tp_getset = pyrf_counts_values_getset, 934 }; 935 936 static int pyrf_counts_values__setup_types(void) 937 { 938 pyrf_counts_values__type.tp_new = PyType_GenericNew; 939 return PyType_Ready(&pyrf_counts_values__type); 940 } 941 942 struct pyrf_evsel { 943 PyObject_HEAD 944 945 struct evsel evsel; 946 }; 947 948 static int pyrf_evsel__init(struct pyrf_evsel *pevsel, 949 PyObject *args, PyObject *kwargs) 950 { 951 struct perf_event_attr attr = { 952 .type = PERF_TYPE_HARDWARE, 953 .config = PERF_COUNT_HW_CPU_CYCLES, 954 .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID, 955 }; 956 static char *kwlist[] = { 957 "type", 958 "config", 959 "sample_freq", 960 "sample_period", 961 "sample_type", 962 "read_format", 963 "disabled", 964 "inherit", 965 "pinned", 966 "exclusive", 967 "exclude_user", 968 "exclude_kernel", 969 "exclude_hv", 970 "exclude_idle", 971 "mmap", 972 "context_switch", 973 "comm", 974 "freq", 975 "inherit_stat", 976 "enable_on_exec", 977 "task", 978 "watermark", 979 "precise_ip", 980 "mmap_data", 981 "sample_id_all", 982 "wakeup_events", 983 "bp_type", 984 "bp_addr", 985 "bp_len", 986 NULL 987 }; 988 u64 sample_period = 0; 989 u32 disabled = 0, 990 inherit = 0, 991 pinned = 0, 992 exclusive = 0, 993 exclude_user = 0, 994 exclude_kernel = 0, 995 exclude_hv = 0, 996 exclude_idle = 0, 997 mmap = 0, 998 context_switch = 0, 999 comm = 0, 1000 freq = 1, 1001 inherit_stat = 0, 1002 enable_on_exec = 0, 1003 task = 0, 1004 watermark = 0, 1005 precise_ip = 0, 1006 mmap_data = 0, 1007 sample_id_all = 1; 1008 int idx = 0; 1009 1010 if (!PyArg_ParseTupleAndKeywords(args, kwargs, 1011 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist, 1012 &attr.type, &attr.config, &attr.sample_freq, 1013 &sample_period, &attr.sample_type, 1014 &attr.read_format, &disabled, &inherit, 1015 &pinned, &exclusive, &exclude_user, 1016 &exclude_kernel, &exclude_hv, &exclude_idle, 1017 &mmap, &context_switch, &comm, &freq, &inherit_stat, 1018 &enable_on_exec, &task, &watermark, 1019 &precise_ip, &mmap_data, &sample_id_all, 1020 &attr.wakeup_events, &attr.bp_type, 1021 &attr.bp_addr, &attr.bp_len, &idx)) 1022 return -1; 1023 1024 /* union... */ 1025 if (sample_period != 0) { 1026 if (attr.sample_freq != 0) 1027 return -1; /* FIXME: throw right exception */ 1028 attr.sample_period = sample_period; 1029 } 1030 1031 /* Bitfields */ 1032 attr.disabled = disabled; 1033 attr.inherit = inherit; 1034 attr.pinned = pinned; 1035 attr.exclusive = exclusive; 1036 attr.exclude_user = exclude_user; 1037 attr.exclude_kernel = exclude_kernel; 1038 attr.exclude_hv = exclude_hv; 1039 attr.exclude_idle = exclude_idle; 1040 attr.mmap = mmap; 1041 attr.context_switch = context_switch; 1042 attr.comm = comm; 1043 attr.freq = freq; 1044 attr.inherit_stat = inherit_stat; 1045 attr.enable_on_exec = enable_on_exec; 1046 attr.task = task; 1047 attr.watermark = watermark; 1048 attr.precise_ip = precise_ip; 1049 attr.mmap_data = mmap_data; 1050 attr.sample_id_all = sample_id_all; 1051 attr.size = sizeof(attr); 1052 1053 evsel__init(&pevsel->evsel, &attr, idx); 1054 return 0; 1055 } 1056 1057 static void pyrf_evsel__delete(struct pyrf_evsel *pevsel) 1058 { 1059 evsel__exit(&pevsel->evsel); 1060 Py_TYPE(pevsel)->tp_free((PyObject*)pevsel); 1061 } 1062 1063 static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel, 1064 PyObject *args, PyObject *kwargs) 1065 { 1066 struct evsel *evsel = &pevsel->evsel; 1067 struct perf_cpu_map *cpus = NULL; 1068 struct perf_thread_map *threads = NULL; 1069 PyObject *pcpus = NULL, *pthreads = NULL; 1070 int group = 0, inherit = 0; 1071 static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL }; 1072 1073 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, 1074 &pcpus, &pthreads, &group, &inherit)) 1075 return NULL; 1076 1077 if (pthreads != NULL) 1078 threads = ((struct pyrf_thread_map *)pthreads)->threads; 1079 1080 if (pcpus != NULL) 1081 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; 1082 1083 evsel->core.attr.inherit = inherit; 1084 /* 1085 * This will group just the fds for this single evsel, to group 1086 * multiple events, use evlist.open(). 1087 */ 1088 if (evsel__open(evsel, cpus, threads) < 0) { 1089 PyErr_SetFromErrno(PyExc_OSError); 1090 return NULL; 1091 } 1092 1093 Py_INCREF(Py_None); 1094 return Py_None; 1095 } 1096 1097 static PyObject *pyrf_evsel__cpus(struct pyrf_evsel *pevsel) 1098 { 1099 struct pyrf_cpu_map *pcpu_map = PyObject_New(struct pyrf_cpu_map, &pyrf_cpu_map__type); 1100 1101 if (pcpu_map) 1102 pcpu_map->cpus = perf_cpu_map__get(pevsel->evsel.core.cpus); 1103 1104 return (PyObject *)pcpu_map; 1105 } 1106 1107 static PyObject *pyrf_evsel__threads(struct pyrf_evsel *pevsel) 1108 { 1109 struct pyrf_thread_map *pthread_map = 1110 PyObject_New(struct pyrf_thread_map, &pyrf_thread_map__type); 1111 1112 if (pthread_map) 1113 pthread_map->threads = perf_thread_map__get(pevsel->evsel.core.threads); 1114 1115 return (PyObject *)pthread_map; 1116 } 1117 1118 /* 1119 * Ensure evsel's counts and prev_raw_counts are allocated, the latter 1120 * used by tool PMUs to compute the cumulative count as expected by 1121 * stat's process_counter_values. 1122 */ 1123 static int evsel__ensure_counts(struct evsel *evsel) 1124 { 1125 int nthreads, ncpus; 1126 1127 if (evsel->counts != NULL) 1128 return 0; 1129 1130 nthreads = perf_thread_map__nr(evsel->core.threads); 1131 ncpus = perf_cpu_map__nr(evsel->core.cpus); 1132 1133 evsel->counts = perf_counts__new(ncpus, nthreads); 1134 if (evsel->counts == NULL) 1135 return -ENOMEM; 1136 1137 evsel->prev_raw_counts = perf_counts__new(ncpus, nthreads); 1138 if (evsel->prev_raw_counts == NULL) 1139 return -ENOMEM; 1140 1141 return 0; 1142 } 1143 1144 static PyObject *pyrf_evsel__read(struct pyrf_evsel *pevsel, 1145 PyObject *args, PyObject *kwargs) 1146 { 1147 struct evsel *evsel = &pevsel->evsel; 1148 int cpu = 0, cpu_idx, thread = 0, thread_idx; 1149 struct perf_counts_values *old_count, *new_count; 1150 struct pyrf_counts_values *count_values = PyObject_New(struct pyrf_counts_values, 1151 &pyrf_counts_values__type); 1152 1153 if (!count_values) 1154 return NULL; 1155 1156 if (!PyArg_ParseTuple(args, "ii", &cpu, &thread)) 1157 return NULL; 1158 1159 cpu_idx = perf_cpu_map__idx(evsel->core.cpus, (struct perf_cpu){.cpu = cpu}); 1160 if (cpu_idx < 0) { 1161 PyErr_Format(PyExc_TypeError, "CPU %d is not part of evsel's CPUs", cpu); 1162 return NULL; 1163 } 1164 thread_idx = perf_thread_map__idx(evsel->core.threads, thread); 1165 if (thread_idx < 0) { 1166 PyErr_Format(PyExc_TypeError, "Thread %d is not part of evsel's threads", 1167 thread); 1168 return NULL; 1169 } 1170 1171 if (evsel__ensure_counts(evsel)) 1172 return PyErr_NoMemory(); 1173 1174 /* Set up pointers to the old and newly read counter values. */ 1175 old_count = perf_counts(evsel->prev_raw_counts, cpu_idx, thread_idx); 1176 new_count = perf_counts(evsel->counts, cpu_idx, thread_idx); 1177 /* Update the value in evsel->counts. */ 1178 evsel__read_counter(evsel, cpu_idx, thread_idx); 1179 /* Copy the value and turn it into the delta from old_count. */ 1180 count_values->values = *new_count; 1181 count_values->values.val -= old_count->val; 1182 count_values->values.ena -= old_count->ena; 1183 count_values->values.run -= old_count->run; 1184 /* Save the new count over the old_count for the next read. */ 1185 *old_count = *new_count; 1186 return (PyObject *)count_values; 1187 } 1188 1189 static PyObject *pyrf_evsel__str(PyObject *self) 1190 { 1191 struct pyrf_evsel *pevsel = (void *)self; 1192 struct evsel *evsel = &pevsel->evsel; 1193 1194 return PyUnicode_FromFormat("evsel(%s/%s/)", evsel__pmu_name(evsel), evsel__name(evsel)); 1195 } 1196 1197 static PyMethodDef pyrf_evsel__methods[] = { 1198 { 1199 .ml_name = "open", 1200 .ml_meth = (PyCFunction)pyrf_evsel__open, 1201 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1202 .ml_doc = PyDoc_STR("open the event selector file descriptor table.") 1203 }, 1204 { 1205 .ml_name = "cpus", 1206 .ml_meth = (PyCFunction)pyrf_evsel__cpus, 1207 .ml_flags = METH_NOARGS, 1208 .ml_doc = PyDoc_STR("CPUs the event is to be used with.") 1209 }, 1210 { 1211 .ml_name = "threads", 1212 .ml_meth = (PyCFunction)pyrf_evsel__threads, 1213 .ml_flags = METH_NOARGS, 1214 .ml_doc = PyDoc_STR("threads the event is to be used with.") 1215 }, 1216 { 1217 .ml_name = "read", 1218 .ml_meth = (PyCFunction)pyrf_evsel__read, 1219 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1220 .ml_doc = PyDoc_STR("read counters") 1221 }, 1222 { .ml_name = NULL, } 1223 }; 1224 1225 #define evsel_member_def(member, ptype, help) \ 1226 { #member, ptype, \ 1227 offsetof(struct pyrf_evsel, evsel.member), \ 1228 0, help } 1229 1230 #define evsel_attr_member_def(member, ptype, help) \ 1231 { #member, ptype, \ 1232 offsetof(struct pyrf_evsel, evsel.core.attr.member), \ 1233 0, help } 1234 1235 static PyMemberDef pyrf_evsel__members[] = { 1236 evsel_member_def(tracking, T_BOOL, "tracking event."), 1237 evsel_attr_member_def(type, T_UINT, "attribute type."), 1238 evsel_attr_member_def(size, T_UINT, "attribute size."), 1239 evsel_attr_member_def(config, T_ULONGLONG, "attribute config."), 1240 evsel_attr_member_def(sample_period, T_ULONGLONG, "attribute sample_period."), 1241 evsel_attr_member_def(sample_type, T_ULONGLONG, "attribute sample_type."), 1242 evsel_attr_member_def(read_format, T_ULONGLONG, "attribute read_format."), 1243 evsel_attr_member_def(wakeup_events, T_UINT, "attribute wakeup_events."), 1244 { .name = NULL, }, 1245 }; 1246 1247 static const char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object."); 1248 1249 static PyTypeObject pyrf_evsel__type = { 1250 PyVarObject_HEAD_INIT(NULL, 0) 1251 .tp_name = "perf.evsel", 1252 .tp_basicsize = sizeof(struct pyrf_evsel), 1253 .tp_dealloc = (destructor)pyrf_evsel__delete, 1254 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 1255 .tp_doc = pyrf_evsel__doc, 1256 .tp_members = pyrf_evsel__members, 1257 .tp_methods = pyrf_evsel__methods, 1258 .tp_init = (initproc)pyrf_evsel__init, 1259 .tp_str = pyrf_evsel__str, 1260 .tp_repr = pyrf_evsel__str, 1261 }; 1262 1263 static int pyrf_evsel__setup_types(void) 1264 { 1265 pyrf_evsel__type.tp_new = PyType_GenericNew; 1266 return PyType_Ready(&pyrf_evsel__type); 1267 } 1268 1269 struct pyrf_evlist { 1270 PyObject_HEAD 1271 1272 struct evlist evlist; 1273 }; 1274 1275 static int pyrf_evlist__init(struct pyrf_evlist *pevlist, 1276 PyObject *args, PyObject *kwargs __maybe_unused) 1277 { 1278 PyObject *pcpus = NULL, *pthreads = NULL; 1279 struct perf_cpu_map *cpus; 1280 struct perf_thread_map *threads; 1281 1282 if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads)) 1283 return -1; 1284 1285 threads = ((struct pyrf_thread_map *)pthreads)->threads; 1286 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; 1287 evlist__init(&pevlist->evlist, cpus, threads); 1288 return 0; 1289 } 1290 1291 static void pyrf_evlist__delete(struct pyrf_evlist *pevlist) 1292 { 1293 evlist__exit(&pevlist->evlist); 1294 Py_TYPE(pevlist)->tp_free((PyObject*)pevlist); 1295 } 1296 1297 static PyObject *pyrf_evlist__all_cpus(struct pyrf_evlist *pevlist) 1298 { 1299 struct pyrf_cpu_map *pcpu_map = PyObject_New(struct pyrf_cpu_map, &pyrf_cpu_map__type); 1300 1301 if (pcpu_map) 1302 pcpu_map->cpus = perf_cpu_map__get(pevlist->evlist.core.all_cpus); 1303 1304 return (PyObject *)pcpu_map; 1305 } 1306 1307 static PyObject *pyrf_evlist__metrics(struct pyrf_evlist *pevlist) 1308 { 1309 PyObject *list = PyList_New(/*len=*/0); 1310 struct rb_node *node; 1311 1312 if (!list) 1313 return NULL; 1314 1315 for (node = rb_first_cached(&pevlist->evlist.metric_events.entries); node; 1316 node = rb_next(node)) { 1317 struct metric_event *me = container_of(node, struct metric_event, nd); 1318 struct list_head *pos; 1319 1320 list_for_each(pos, &me->head) { 1321 struct metric_expr *expr = container_of(pos, struct metric_expr, nd); 1322 PyObject *str = PyUnicode_FromString(expr->metric_name); 1323 1324 if (!str || PyList_Append(list, str) != 0) { 1325 Py_DECREF(list); 1326 return NULL; 1327 } 1328 Py_DECREF(str); 1329 } 1330 } 1331 return list; 1332 } 1333 1334 static int prepare_metric(const struct metric_expr *mexp, 1335 const struct evsel *evsel, 1336 struct expr_parse_ctx *pctx, 1337 int cpu_idx, int thread_idx) 1338 { 1339 struct evsel * const *metric_events = mexp->metric_events; 1340 struct metric_ref *metric_refs = mexp->metric_refs; 1341 1342 for (int i = 0; metric_events[i]; i++) { 1343 struct evsel *cur = metric_events[i]; 1344 double val, ena, run; 1345 int ret, source_count = 0; 1346 struct perf_counts_values *old_count, *new_count; 1347 char *n = strdup(evsel__metric_id(cur)); 1348 1349 if (!n) 1350 return -ENOMEM; 1351 1352 /* 1353 * If there are multiple uncore PMUs and we're not reading the 1354 * leader's stats, determine the stats for the appropriate 1355 * uncore PMU. 1356 */ 1357 if (evsel && evsel->metric_leader && 1358 evsel->pmu != evsel->metric_leader->pmu && 1359 cur->pmu == evsel->metric_leader->pmu) { 1360 struct evsel *pos; 1361 1362 evlist__for_each_entry(evsel->evlist, pos) { 1363 if (pos->pmu != evsel->pmu) 1364 continue; 1365 if (pos->metric_leader != cur) 1366 continue; 1367 cur = pos; 1368 source_count = 1; 1369 break; 1370 } 1371 } 1372 1373 if (source_count == 0) 1374 source_count = evsel__source_count(cur); 1375 1376 ret = evsel__ensure_counts(cur); 1377 if (ret) 1378 return ret; 1379 1380 /* Set up pointers to the old and newly read counter values. */ 1381 old_count = perf_counts(cur->prev_raw_counts, cpu_idx, thread_idx); 1382 new_count = perf_counts(cur->counts, cpu_idx, thread_idx); 1383 /* Update the value in cur->counts. */ 1384 evsel__read_counter(cur, cpu_idx, thread_idx); 1385 1386 val = new_count->val - old_count->val; 1387 ena = new_count->ena - old_count->ena; 1388 run = new_count->run - old_count->run; 1389 1390 if (ena != run && run != 0) 1391 val = val * ena / run; 1392 ret = expr__add_id_val_source_count(pctx, n, val, source_count); 1393 if (ret) 1394 return ret; 1395 } 1396 1397 for (int i = 0; metric_refs && metric_refs[i].metric_name; i++) { 1398 int ret = expr__add_ref(pctx, &metric_refs[i]); 1399 1400 if (ret) 1401 return ret; 1402 } 1403 1404 return 0; 1405 } 1406 1407 static PyObject *pyrf_evlist__compute_metric(struct pyrf_evlist *pevlist, 1408 PyObject *args, PyObject *kwargs) 1409 { 1410 int ret, cpu = 0, cpu_idx = 0, thread = 0, thread_idx = 0; 1411 const char *metric; 1412 struct rb_node *node; 1413 struct metric_expr *mexp = NULL; 1414 struct expr_parse_ctx *pctx; 1415 double result = 0; 1416 struct evsel *metric_evsel = NULL; 1417 1418 if (!PyArg_ParseTuple(args, "sii", &metric, &cpu, &thread)) 1419 return NULL; 1420 1421 for (node = rb_first_cached(&pevlist->evlist.metric_events.entries); 1422 mexp == NULL && node; 1423 node = rb_next(node)) { 1424 struct metric_event *me = container_of(node, struct metric_event, nd); 1425 struct list_head *pos; 1426 1427 list_for_each(pos, &me->head) { 1428 struct metric_expr *e = container_of(pos, struct metric_expr, nd); 1429 struct evsel *pos2; 1430 1431 if (strcmp(e->metric_name, metric)) 1432 continue; 1433 1434 if (e->metric_events[0] == NULL) 1435 continue; 1436 1437 evlist__for_each_entry(&pevlist->evlist, pos2) { 1438 if (pos2->metric_leader != e->metric_events[0]) 1439 continue; 1440 cpu_idx = perf_cpu_map__idx(pos2->core.cpus, 1441 (struct perf_cpu){.cpu = cpu}); 1442 if (cpu_idx < 0) 1443 continue; 1444 1445 thread_idx = perf_thread_map__idx(pos2->core.threads, thread); 1446 if (thread_idx < 0) 1447 continue; 1448 metric_evsel = pos2; 1449 mexp = e; 1450 goto done; 1451 } 1452 } 1453 } 1454 done: 1455 if (!mexp) { 1456 PyErr_Format(PyExc_TypeError, "Unknown metric '%s' for CPU '%d' and thread '%d'", 1457 metric, cpu, thread); 1458 return NULL; 1459 } 1460 1461 pctx = expr__ctx_new(); 1462 if (!pctx) 1463 return PyErr_NoMemory(); 1464 1465 ret = prepare_metric(mexp, metric_evsel, pctx, cpu_idx, thread_idx); 1466 if (ret) { 1467 expr__ctx_free(pctx); 1468 errno = -ret; 1469 PyErr_SetFromErrno(PyExc_OSError); 1470 return NULL; 1471 } 1472 if (expr__parse(&result, pctx, mexp->metric_expr)) 1473 result = 0.0; 1474 1475 expr__ctx_free(pctx); 1476 return PyFloat_FromDouble(result); 1477 } 1478 1479 static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist, 1480 PyObject *args, PyObject *kwargs) 1481 { 1482 struct evlist *evlist = &pevlist->evlist; 1483 static char *kwlist[] = { "pages", "overwrite", NULL }; 1484 int pages = 128, overwrite = false; 1485 1486 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist, 1487 &pages, &overwrite)) 1488 return NULL; 1489 1490 if (evlist__mmap(evlist, pages) < 0) { 1491 PyErr_SetFromErrno(PyExc_OSError); 1492 return NULL; 1493 } 1494 1495 Py_INCREF(Py_None); 1496 return Py_None; 1497 } 1498 1499 static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist, 1500 PyObject *args, PyObject *kwargs) 1501 { 1502 struct evlist *evlist = &pevlist->evlist; 1503 static char *kwlist[] = { "timeout", NULL }; 1504 int timeout = -1, n; 1505 1506 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout)) 1507 return NULL; 1508 1509 n = evlist__poll(evlist, timeout); 1510 if (n < 0) { 1511 PyErr_SetFromErrno(PyExc_OSError); 1512 return NULL; 1513 } 1514 1515 return Py_BuildValue("i", n); 1516 } 1517 1518 static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist, 1519 PyObject *args __maybe_unused, 1520 PyObject *kwargs __maybe_unused) 1521 { 1522 struct evlist *evlist = &pevlist->evlist; 1523 PyObject *list = PyList_New(0); 1524 int i; 1525 1526 for (i = 0; i < evlist->core.pollfd.nr; ++i) { 1527 PyObject *file; 1528 file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1, 1529 NULL, NULL, NULL, 0); 1530 if (file == NULL) 1531 goto free_list; 1532 1533 if (PyList_Append(list, file) != 0) { 1534 Py_DECREF(file); 1535 goto free_list; 1536 } 1537 1538 Py_DECREF(file); 1539 } 1540 1541 return list; 1542 free_list: 1543 return PyErr_NoMemory(); 1544 } 1545 1546 1547 static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist, 1548 PyObject *args, 1549 PyObject *kwargs __maybe_unused) 1550 { 1551 struct evlist *evlist = &pevlist->evlist; 1552 PyObject *pevsel; 1553 struct evsel *evsel; 1554 1555 if (!PyArg_ParseTuple(args, "O", &pevsel)) 1556 return NULL; 1557 1558 Py_INCREF(pevsel); 1559 evsel = &((struct pyrf_evsel *)pevsel)->evsel; 1560 evsel->core.idx = evlist->core.nr_entries; 1561 evlist__add(evlist, evsel); 1562 1563 return Py_BuildValue("i", evlist->core.nr_entries); 1564 } 1565 1566 static struct mmap *get_md(struct evlist *evlist, int cpu) 1567 { 1568 int i; 1569 1570 for (i = 0; i < evlist->core.nr_mmaps; i++) { 1571 struct mmap *md = &evlist->mmap[i]; 1572 1573 if (md->core.cpu.cpu == cpu) 1574 return md; 1575 } 1576 1577 return NULL; 1578 } 1579 1580 static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, 1581 PyObject *args, PyObject *kwargs) 1582 { 1583 struct evlist *evlist = &pevlist->evlist; 1584 union perf_event *event; 1585 int sample_id_all = 1, cpu; 1586 static char *kwlist[] = { "cpu", "sample_id_all", NULL }; 1587 struct mmap *md; 1588 int err; 1589 1590 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist, 1591 &cpu, &sample_id_all)) 1592 return NULL; 1593 1594 md = get_md(evlist, cpu); 1595 if (!md) { 1596 PyErr_Format(PyExc_TypeError, "Unknown CPU '%d'", cpu); 1597 return NULL; 1598 } 1599 1600 if (perf_mmap__read_init(&md->core) < 0) 1601 goto end; 1602 1603 event = perf_mmap__read_event(&md->core); 1604 if (event != NULL) { 1605 PyObject *pyevent = pyrf_event__new(event); 1606 struct pyrf_event *pevent = (struct pyrf_event *)pyevent; 1607 struct evsel *evsel; 1608 1609 if (pyevent == NULL) 1610 return PyErr_NoMemory(); 1611 1612 evsel = evlist__event2evsel(evlist, event); 1613 if (!evsel) { 1614 Py_DECREF(pyevent); 1615 Py_INCREF(Py_None); 1616 return Py_None; 1617 } 1618 1619 pevent->evsel = evsel; 1620 1621 perf_mmap__consume(&md->core); 1622 1623 err = evsel__parse_sample(evsel, &pevent->event, &pevent->sample); 1624 if (err) { 1625 Py_DECREF(pyevent); 1626 return PyErr_Format(PyExc_OSError, 1627 "perf: can't parse sample, err=%d", err); 1628 } 1629 1630 return pyevent; 1631 } 1632 end: 1633 Py_INCREF(Py_None); 1634 return Py_None; 1635 } 1636 1637 static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist, 1638 PyObject *args, PyObject *kwargs) 1639 { 1640 struct evlist *evlist = &pevlist->evlist; 1641 1642 if (evlist__open(evlist) < 0) { 1643 PyErr_SetFromErrno(PyExc_OSError); 1644 return NULL; 1645 } 1646 1647 Py_INCREF(Py_None); 1648 return Py_None; 1649 } 1650 1651 static PyObject *pyrf_evlist__close(struct pyrf_evlist *pevlist) 1652 { 1653 struct evlist *evlist = &pevlist->evlist; 1654 1655 evlist__close(evlist); 1656 1657 Py_INCREF(Py_None); 1658 return Py_None; 1659 } 1660 1661 static PyObject *pyrf_evlist__config(struct pyrf_evlist *pevlist) 1662 { 1663 struct record_opts opts = { 1664 .sample_time = true, 1665 .mmap_pages = UINT_MAX, 1666 .user_freq = UINT_MAX, 1667 .user_interval = ULLONG_MAX, 1668 .freq = 4000, 1669 .target = { 1670 .uses_mmap = true, 1671 .default_per_cpu = true, 1672 }, 1673 .nr_threads_synthesize = 1, 1674 .ctl_fd = -1, 1675 .ctl_fd_ack = -1, 1676 .no_buffering = true, 1677 .no_inherit = true, 1678 }; 1679 struct evlist *evlist = &pevlist->evlist; 1680 1681 evlist__config(evlist, &opts, &callchain_param); 1682 Py_INCREF(Py_None); 1683 return Py_None; 1684 } 1685 1686 static PyObject *pyrf_evlist__disable(struct pyrf_evlist *pevlist) 1687 { 1688 evlist__disable(&pevlist->evlist); 1689 Py_INCREF(Py_None); 1690 return Py_None; 1691 } 1692 1693 static PyObject *pyrf_evlist__enable(struct pyrf_evlist *pevlist) 1694 { 1695 evlist__enable(&pevlist->evlist); 1696 Py_INCREF(Py_None); 1697 return Py_None; 1698 } 1699 1700 static PyMethodDef pyrf_evlist__methods[] = { 1701 { 1702 .ml_name = "all_cpus", 1703 .ml_meth = (PyCFunction)pyrf_evlist__all_cpus, 1704 .ml_flags = METH_NOARGS, 1705 .ml_doc = PyDoc_STR("CPU map union of all evsel CPU maps.") 1706 }, 1707 { 1708 .ml_name = "metrics", 1709 .ml_meth = (PyCFunction)pyrf_evlist__metrics, 1710 .ml_flags = METH_NOARGS, 1711 .ml_doc = PyDoc_STR("List of metric names within the evlist.") 1712 }, 1713 { 1714 .ml_name = "compute_metric", 1715 .ml_meth = (PyCFunction)pyrf_evlist__compute_metric, 1716 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1717 .ml_doc = PyDoc_STR("compute metric for given name, cpu and thread") 1718 }, 1719 { 1720 .ml_name = "mmap", 1721 .ml_meth = (PyCFunction)pyrf_evlist__mmap, 1722 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1723 .ml_doc = PyDoc_STR("mmap the file descriptor table.") 1724 }, 1725 { 1726 .ml_name = "open", 1727 .ml_meth = (PyCFunction)pyrf_evlist__open, 1728 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1729 .ml_doc = PyDoc_STR("open the file descriptors.") 1730 }, 1731 { 1732 .ml_name = "close", 1733 .ml_meth = (PyCFunction)pyrf_evlist__close, 1734 .ml_flags = METH_NOARGS, 1735 .ml_doc = PyDoc_STR("close the file descriptors.") 1736 }, 1737 { 1738 .ml_name = "poll", 1739 .ml_meth = (PyCFunction)pyrf_evlist__poll, 1740 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1741 .ml_doc = PyDoc_STR("poll the file descriptor table.") 1742 }, 1743 { 1744 .ml_name = "get_pollfd", 1745 .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd, 1746 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1747 .ml_doc = PyDoc_STR("get the poll file descriptor table.") 1748 }, 1749 { 1750 .ml_name = "add", 1751 .ml_meth = (PyCFunction)pyrf_evlist__add, 1752 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1753 .ml_doc = PyDoc_STR("adds an event selector to the list.") 1754 }, 1755 { 1756 .ml_name = "read_on_cpu", 1757 .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu, 1758 .ml_flags = METH_VARARGS | METH_KEYWORDS, 1759 .ml_doc = PyDoc_STR("reads an event.") 1760 }, 1761 { 1762 .ml_name = "config", 1763 .ml_meth = (PyCFunction)pyrf_evlist__config, 1764 .ml_flags = METH_NOARGS, 1765 .ml_doc = PyDoc_STR("Apply default record options to the evlist.") 1766 }, 1767 { 1768 .ml_name = "disable", 1769 .ml_meth = (PyCFunction)pyrf_evlist__disable, 1770 .ml_flags = METH_NOARGS, 1771 .ml_doc = PyDoc_STR("Disable the evsels in the evlist.") 1772 }, 1773 { 1774 .ml_name = "enable", 1775 .ml_meth = (PyCFunction)pyrf_evlist__enable, 1776 .ml_flags = METH_NOARGS, 1777 .ml_doc = PyDoc_STR("Enable the evsels in the evlist.") 1778 }, 1779 { .ml_name = NULL, } 1780 }; 1781 1782 static Py_ssize_t pyrf_evlist__length(PyObject *obj) 1783 { 1784 struct pyrf_evlist *pevlist = (void *)obj; 1785 1786 return pevlist->evlist.core.nr_entries; 1787 } 1788 1789 static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i) 1790 { 1791 struct pyrf_evlist *pevlist = (void *)obj; 1792 struct evsel *pos; 1793 1794 if (i >= pevlist->evlist.core.nr_entries) { 1795 PyErr_SetString(PyExc_IndexError, "Index out of range"); 1796 return NULL; 1797 } 1798 1799 evlist__for_each_entry(&pevlist->evlist, pos) { 1800 if (i-- == 0) 1801 break; 1802 } 1803 1804 return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel)); 1805 } 1806 1807 static PyObject *pyrf_evlist__str(PyObject *self) 1808 { 1809 struct pyrf_evlist *pevlist = (void *)self; 1810 struct evsel *pos; 1811 struct strbuf sb = STRBUF_INIT; 1812 bool first = true; 1813 PyObject *result; 1814 1815 strbuf_addstr(&sb, "evlist(["); 1816 evlist__for_each_entry(&pevlist->evlist, pos) { 1817 if (!first) 1818 strbuf_addch(&sb, ','); 1819 if (!pos->pmu) 1820 strbuf_addstr(&sb, evsel__name(pos)); 1821 else 1822 strbuf_addf(&sb, "%s/%s/", pos->pmu->name, evsel__name(pos)); 1823 first = false; 1824 } 1825 strbuf_addstr(&sb, "])"); 1826 result = PyUnicode_FromString(sb.buf); 1827 strbuf_release(&sb); 1828 return result; 1829 } 1830 1831 static PySequenceMethods pyrf_evlist__sequence_methods = { 1832 .sq_length = pyrf_evlist__length, 1833 .sq_item = pyrf_evlist__item, 1834 }; 1835 1836 static const char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object."); 1837 1838 static PyTypeObject pyrf_evlist__type = { 1839 PyVarObject_HEAD_INIT(NULL, 0) 1840 .tp_name = "perf.evlist", 1841 .tp_basicsize = sizeof(struct pyrf_evlist), 1842 .tp_dealloc = (destructor)pyrf_evlist__delete, 1843 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, 1844 .tp_as_sequence = &pyrf_evlist__sequence_methods, 1845 .tp_doc = pyrf_evlist__doc, 1846 .tp_methods = pyrf_evlist__methods, 1847 .tp_init = (initproc)pyrf_evlist__init, 1848 .tp_repr = pyrf_evlist__str, 1849 .tp_str = pyrf_evlist__str, 1850 }; 1851 1852 static int pyrf_evlist__setup_types(void) 1853 { 1854 pyrf_evlist__type.tp_new = PyType_GenericNew; 1855 return PyType_Ready(&pyrf_evlist__type); 1856 } 1857 1858 #define PERF_CONST(name) { #name, PERF_##name } 1859 1860 struct perf_constant { 1861 const char *name; 1862 int value; 1863 }; 1864 1865 static const struct perf_constant perf__constants[] = { 1866 PERF_CONST(TYPE_HARDWARE), 1867 PERF_CONST(TYPE_SOFTWARE), 1868 PERF_CONST(TYPE_TRACEPOINT), 1869 PERF_CONST(TYPE_HW_CACHE), 1870 PERF_CONST(TYPE_RAW), 1871 PERF_CONST(TYPE_BREAKPOINT), 1872 1873 PERF_CONST(COUNT_HW_CPU_CYCLES), 1874 PERF_CONST(COUNT_HW_INSTRUCTIONS), 1875 PERF_CONST(COUNT_HW_CACHE_REFERENCES), 1876 PERF_CONST(COUNT_HW_CACHE_MISSES), 1877 PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS), 1878 PERF_CONST(COUNT_HW_BRANCH_MISSES), 1879 PERF_CONST(COUNT_HW_BUS_CYCLES), 1880 PERF_CONST(COUNT_HW_CACHE_L1D), 1881 PERF_CONST(COUNT_HW_CACHE_L1I), 1882 PERF_CONST(COUNT_HW_CACHE_LL), 1883 PERF_CONST(COUNT_HW_CACHE_DTLB), 1884 PERF_CONST(COUNT_HW_CACHE_ITLB), 1885 PERF_CONST(COUNT_HW_CACHE_BPU), 1886 PERF_CONST(COUNT_HW_CACHE_OP_READ), 1887 PERF_CONST(COUNT_HW_CACHE_OP_WRITE), 1888 PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH), 1889 PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS), 1890 PERF_CONST(COUNT_HW_CACHE_RESULT_MISS), 1891 1892 PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND), 1893 PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND), 1894 1895 PERF_CONST(COUNT_SW_CPU_CLOCK), 1896 PERF_CONST(COUNT_SW_TASK_CLOCK), 1897 PERF_CONST(COUNT_SW_PAGE_FAULTS), 1898 PERF_CONST(COUNT_SW_CONTEXT_SWITCHES), 1899 PERF_CONST(COUNT_SW_CPU_MIGRATIONS), 1900 PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN), 1901 PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ), 1902 PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS), 1903 PERF_CONST(COUNT_SW_EMULATION_FAULTS), 1904 PERF_CONST(COUNT_SW_DUMMY), 1905 1906 PERF_CONST(SAMPLE_IP), 1907 PERF_CONST(SAMPLE_TID), 1908 PERF_CONST(SAMPLE_TIME), 1909 PERF_CONST(SAMPLE_ADDR), 1910 PERF_CONST(SAMPLE_READ), 1911 PERF_CONST(SAMPLE_CALLCHAIN), 1912 PERF_CONST(SAMPLE_ID), 1913 PERF_CONST(SAMPLE_CPU), 1914 PERF_CONST(SAMPLE_PERIOD), 1915 PERF_CONST(SAMPLE_STREAM_ID), 1916 PERF_CONST(SAMPLE_RAW), 1917 1918 PERF_CONST(FORMAT_TOTAL_TIME_ENABLED), 1919 PERF_CONST(FORMAT_TOTAL_TIME_RUNNING), 1920 PERF_CONST(FORMAT_ID), 1921 PERF_CONST(FORMAT_GROUP), 1922 1923 PERF_CONST(RECORD_MMAP), 1924 PERF_CONST(RECORD_LOST), 1925 PERF_CONST(RECORD_COMM), 1926 PERF_CONST(RECORD_EXIT), 1927 PERF_CONST(RECORD_THROTTLE), 1928 PERF_CONST(RECORD_UNTHROTTLE), 1929 PERF_CONST(RECORD_FORK), 1930 PERF_CONST(RECORD_READ), 1931 PERF_CONST(RECORD_SAMPLE), 1932 PERF_CONST(RECORD_MMAP2), 1933 PERF_CONST(RECORD_AUX), 1934 PERF_CONST(RECORD_ITRACE_START), 1935 PERF_CONST(RECORD_LOST_SAMPLES), 1936 PERF_CONST(RECORD_SWITCH), 1937 PERF_CONST(RECORD_SWITCH_CPU_WIDE), 1938 1939 PERF_CONST(RECORD_MISC_SWITCH_OUT), 1940 { .name = NULL, }, 1941 }; 1942 1943 static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel, 1944 PyObject *args, PyObject *kwargs) 1945 { 1946 static char *kwlist[] = { "sys", "name", NULL }; 1947 char *sys = NULL; 1948 char *name = NULL; 1949 1950 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist, 1951 &sys, &name)) 1952 return NULL; 1953 1954 return PyLong_FromLong(tp_pmu__id(sys, name)); 1955 } 1956 1957 static PyObject *pyrf_evsel__from_evsel(struct evsel *evsel) 1958 { 1959 struct pyrf_evsel *pevsel = PyObject_New(struct pyrf_evsel, &pyrf_evsel__type); 1960 1961 if (!pevsel) 1962 return NULL; 1963 1964 memset(&pevsel->evsel, 0, sizeof(pevsel->evsel)); 1965 evsel__init(&pevsel->evsel, &evsel->core.attr, evsel->core.idx); 1966 1967 evsel__clone(&pevsel->evsel, evsel); 1968 if (evsel__is_group_leader(evsel)) 1969 evsel__set_leader(&pevsel->evsel, &pevsel->evsel); 1970 return (PyObject *)pevsel; 1971 } 1972 1973 static int evlist__pos(struct evlist *evlist, struct evsel *evsel) 1974 { 1975 struct evsel *pos; 1976 int idx = 0; 1977 1978 evlist__for_each_entry(evlist, pos) { 1979 if (evsel == pos) 1980 return idx; 1981 idx++; 1982 } 1983 return -1; 1984 } 1985 1986 static struct evsel *evlist__at(struct evlist *evlist, int idx) 1987 { 1988 struct evsel *pos; 1989 int idx2 = 0; 1990 1991 evlist__for_each_entry(evlist, pos) { 1992 if (idx == idx2) 1993 return pos; 1994 idx2++; 1995 } 1996 return NULL; 1997 } 1998 1999 static PyObject *pyrf_evlist__from_evlist(struct evlist *evlist) 2000 { 2001 struct pyrf_evlist *pevlist = PyObject_New(struct pyrf_evlist, &pyrf_evlist__type); 2002 struct evsel *pos; 2003 struct rb_node *node; 2004 2005 if (!pevlist) 2006 return NULL; 2007 2008 memset(&pevlist->evlist, 0, sizeof(pevlist->evlist)); 2009 evlist__init(&pevlist->evlist, evlist->core.all_cpus, evlist->core.threads); 2010 evlist__for_each_entry(evlist, pos) { 2011 struct pyrf_evsel *pevsel = (void *)pyrf_evsel__from_evsel(pos); 2012 2013 evlist__add(&pevlist->evlist, &pevsel->evsel); 2014 } 2015 evlist__for_each_entry(&pevlist->evlist, pos) { 2016 struct evsel *leader = evsel__leader(pos); 2017 2018 if (pos != leader) { 2019 int idx = evlist__pos(evlist, leader); 2020 2021 if (idx >= 0) 2022 evsel__set_leader(pos, evlist__at(&pevlist->evlist, idx)); 2023 else if (leader == NULL) 2024 evsel__set_leader(pos, pos); 2025 } 2026 2027 leader = pos->metric_leader; 2028 2029 if (pos != leader) { 2030 int idx = evlist__pos(evlist, leader); 2031 2032 if (idx >= 0) 2033 pos->metric_leader = evlist__at(&pevlist->evlist, idx); 2034 else if (leader == NULL) 2035 pos->metric_leader = pos; 2036 } 2037 } 2038 metricgroup__copy_metric_events(&pevlist->evlist, /*cgrp=*/NULL, 2039 &pevlist->evlist.metric_events, 2040 &evlist->metric_events); 2041 for (node = rb_first_cached(&pevlist->evlist.metric_events.entries); node; 2042 node = rb_next(node)) { 2043 struct metric_event *me = container_of(node, struct metric_event, nd); 2044 struct list_head *mpos; 2045 int idx = evlist__pos(evlist, me->evsel); 2046 2047 if (idx >= 0) 2048 me->evsel = evlist__at(&pevlist->evlist, idx); 2049 list_for_each(mpos, &me->head) { 2050 struct metric_expr *e = container_of(mpos, struct metric_expr, nd); 2051 2052 for (int j = 0; e->metric_events[j]; j++) { 2053 idx = evlist__pos(evlist, e->metric_events[j]); 2054 if (idx >= 0) 2055 e->metric_events[j] = evlist__at(&pevlist->evlist, idx); 2056 } 2057 } 2058 } 2059 return (PyObject *)pevlist; 2060 } 2061 2062 static PyObject *pyrf__parse_events(PyObject *self, PyObject *args) 2063 { 2064 const char *input; 2065 struct evlist evlist = {}; 2066 struct parse_events_error err; 2067 PyObject *result; 2068 PyObject *pcpus = NULL, *pthreads = NULL; 2069 struct perf_cpu_map *cpus; 2070 struct perf_thread_map *threads; 2071 2072 if (!PyArg_ParseTuple(args, "s|OO", &input, &pcpus, &pthreads)) 2073 return NULL; 2074 2075 threads = pthreads ? ((struct pyrf_thread_map *)pthreads)->threads : NULL; 2076 cpus = pcpus ? ((struct pyrf_cpu_map *)pcpus)->cpus : NULL; 2077 2078 parse_events_error__init(&err); 2079 evlist__init(&evlist, cpus, threads); 2080 if (parse_events(&evlist, input, &err)) { 2081 parse_events_error__print(&err, input); 2082 PyErr_SetFromErrno(PyExc_OSError); 2083 return NULL; 2084 } 2085 result = pyrf_evlist__from_evlist(&evlist); 2086 evlist__exit(&evlist); 2087 return result; 2088 } 2089 2090 static PyObject *pyrf__parse_metrics(PyObject *self, PyObject *args) 2091 { 2092 const char *input, *pmu = NULL; 2093 struct evlist evlist = {}; 2094 PyObject *result; 2095 PyObject *pcpus = NULL, *pthreads = NULL; 2096 struct perf_cpu_map *cpus; 2097 struct perf_thread_map *threads; 2098 int ret; 2099 2100 if (!PyArg_ParseTuple(args, "s|sOO", &input, &pmu, &pcpus, &pthreads)) 2101 return NULL; 2102 2103 threads = pthreads ? ((struct pyrf_thread_map *)pthreads)->threads : NULL; 2104 cpus = pcpus ? ((struct pyrf_cpu_map *)pcpus)->cpus : NULL; 2105 2106 evlist__init(&evlist, cpus, threads); 2107 ret = metricgroup__parse_groups(&evlist, pmu ?: "all", input, 2108 /*metric_no_group=*/ false, 2109 /*metric_no_merge=*/ false, 2110 /*metric_no_threshold=*/ true, 2111 /*user_requested_cpu_list=*/ NULL, 2112 /*system_wide=*/true, 2113 /*hardware_aware_grouping=*/ false); 2114 if (ret) { 2115 errno = -ret; 2116 PyErr_SetFromErrno(PyExc_OSError); 2117 return NULL; 2118 } 2119 result = pyrf_evlist__from_evlist(&evlist); 2120 evlist__exit(&evlist); 2121 return result; 2122 } 2123 2124 static PyObject *pyrf__metrics_groups(const struct pmu_metric *pm) 2125 { 2126 PyObject *groups = PyList_New(/*len=*/0); 2127 const char *mg = pm->metric_group; 2128 2129 if (!groups) 2130 return NULL; 2131 2132 while (mg) { 2133 PyObject *val = NULL; 2134 const char *sep = strchr(mg, ';'); 2135 size_t len = sep ? (size_t)(sep - mg) : strlen(mg); 2136 2137 if (len > 0) { 2138 val = PyUnicode_FromStringAndSize(mg, len); 2139 if (val) 2140 PyList_Append(groups, val); 2141 2142 Py_XDECREF(val); 2143 } 2144 mg = sep ? sep + 1 : NULL; 2145 } 2146 return groups; 2147 } 2148 2149 static int pyrf__metrics_cb(const struct pmu_metric *pm, 2150 const struct pmu_metrics_table *table __maybe_unused, 2151 void *vdata) 2152 { 2153 PyObject *py_list = vdata; 2154 PyObject *dict = PyDict_New(); 2155 PyObject *key = dict ? PyUnicode_FromString("MetricGroup") : NULL; 2156 PyObject *value = key ? pyrf__metrics_groups(pm) : NULL; 2157 2158 if (!value || PyDict_SetItem(dict, key, value) != 0) { 2159 Py_XDECREF(key); 2160 Py_XDECREF(value); 2161 Py_XDECREF(dict); 2162 return -ENOMEM; 2163 } 2164 2165 if (!add_to_dict(dict, "MetricName", pm->metric_name) || 2166 !add_to_dict(dict, "PMU", pm->pmu) || 2167 !add_to_dict(dict, "MetricExpr", pm->metric_expr) || 2168 !add_to_dict(dict, "MetricThreshold", pm->metric_threshold) || 2169 !add_to_dict(dict, "ScaleUnit", pm->unit) || 2170 !add_to_dict(dict, "Compat", pm->compat) || 2171 !add_to_dict(dict, "BriefDescription", pm->desc) || 2172 !add_to_dict(dict, "PublicDescription", pm->long_desc) || 2173 PyList_Append(py_list, dict) != 0) { 2174 Py_DECREF(dict); 2175 return -ENOMEM; 2176 } 2177 Py_DECREF(dict); 2178 return 0; 2179 } 2180 2181 static PyObject *pyrf__metrics(PyObject *self, PyObject *args) 2182 { 2183 const struct pmu_metrics_table *table = pmu_metrics_table__find(); 2184 PyObject *list = PyList_New(/*len=*/0); 2185 int ret; 2186 2187 if (!list) 2188 return NULL; 2189 2190 ret = pmu_metrics_table__for_each_metric(table, pyrf__metrics_cb, list); 2191 if (!ret) 2192 ret = pmu_for_each_sys_metric(pyrf__metrics_cb, list); 2193 2194 if (ret) { 2195 Py_DECREF(list); 2196 errno = -ret; 2197 PyErr_SetFromErrno(PyExc_OSError); 2198 return NULL; 2199 } 2200 return list; 2201 } 2202 2203 static PyMethodDef perf__methods[] = { 2204 { 2205 .ml_name = "metrics", 2206 .ml_meth = (PyCFunction) pyrf__metrics, 2207 .ml_flags = METH_NOARGS, 2208 .ml_doc = PyDoc_STR( 2209 "Returns a list of metrics represented as string values in dictionaries.") 2210 }, 2211 { 2212 .ml_name = "tracepoint", 2213 .ml_meth = (PyCFunction) pyrf__tracepoint, 2214 .ml_flags = METH_VARARGS | METH_KEYWORDS, 2215 .ml_doc = PyDoc_STR("Get tracepoint config.") 2216 }, 2217 { 2218 .ml_name = "parse_events", 2219 .ml_meth = (PyCFunction) pyrf__parse_events, 2220 .ml_flags = METH_VARARGS, 2221 .ml_doc = PyDoc_STR("Parse a string of events and return an evlist.") 2222 }, 2223 { 2224 .ml_name = "parse_metrics", 2225 .ml_meth = (PyCFunction) pyrf__parse_metrics, 2226 .ml_flags = METH_VARARGS, 2227 .ml_doc = PyDoc_STR( 2228 "Parse a string of metrics or metric groups and return an evlist.") 2229 }, 2230 { 2231 .ml_name = "pmus", 2232 .ml_meth = (PyCFunction) pyrf__pmus, 2233 .ml_flags = METH_NOARGS, 2234 .ml_doc = PyDoc_STR("Returns a sequence of pmus.") 2235 }, 2236 { .ml_name = NULL, } 2237 }; 2238 2239 PyMODINIT_FUNC PyInit_perf(void) 2240 { 2241 PyObject *obj; 2242 int i; 2243 PyObject *dict; 2244 static struct PyModuleDef moduledef = { 2245 PyModuleDef_HEAD_INIT, 2246 "perf", /* m_name */ 2247 "", /* m_doc */ 2248 -1, /* m_size */ 2249 perf__methods, /* m_methods */ 2250 NULL, /* m_reload */ 2251 NULL, /* m_traverse */ 2252 NULL, /* m_clear */ 2253 NULL, /* m_free */ 2254 }; 2255 PyObject *module = PyModule_Create(&moduledef); 2256 2257 if (module == NULL || 2258 pyrf_event__setup_types() < 0 || 2259 pyrf_evlist__setup_types() < 0 || 2260 pyrf_evsel__setup_types() < 0 || 2261 pyrf_thread_map__setup_types() < 0 || 2262 pyrf_cpu_map__setup_types() < 0 || 2263 pyrf_pmu_iterator__setup_types() < 0 || 2264 pyrf_pmu__setup_types() < 0 || 2265 pyrf_counts_values__setup_types() < 0) 2266 return module; 2267 2268 /* The page_size is placed in util object. */ 2269 page_size = sysconf(_SC_PAGE_SIZE); 2270 2271 Py_INCREF(&pyrf_evlist__type); 2272 PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type); 2273 2274 Py_INCREF(&pyrf_evsel__type); 2275 PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type); 2276 2277 Py_INCREF(&pyrf_mmap_event__type); 2278 PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type); 2279 2280 Py_INCREF(&pyrf_lost_event__type); 2281 PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type); 2282 2283 Py_INCREF(&pyrf_comm_event__type); 2284 PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type); 2285 2286 Py_INCREF(&pyrf_task_event__type); 2287 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type); 2288 2289 Py_INCREF(&pyrf_throttle_event__type); 2290 PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type); 2291 2292 Py_INCREF(&pyrf_task_event__type); 2293 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type); 2294 2295 Py_INCREF(&pyrf_read_event__type); 2296 PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type); 2297 2298 Py_INCREF(&pyrf_sample_event__type); 2299 PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type); 2300 2301 Py_INCREF(&pyrf_context_switch_event__type); 2302 PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type); 2303 2304 Py_INCREF(&pyrf_thread_map__type); 2305 PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type); 2306 2307 Py_INCREF(&pyrf_cpu_map__type); 2308 PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type); 2309 2310 Py_INCREF(&pyrf_counts_values__type); 2311 PyModule_AddObject(module, "counts_values", (PyObject *)&pyrf_counts_values__type); 2312 2313 dict = PyModule_GetDict(module); 2314 if (dict == NULL) 2315 goto error; 2316 2317 for (i = 0; perf__constants[i].name != NULL; i++) { 2318 obj = PyLong_FromLong(perf__constants[i].value); 2319 if (obj == NULL) 2320 goto error; 2321 PyDict_SetItemString(dict, perf__constants[i].name, obj); 2322 Py_DECREF(obj); 2323 } 2324 2325 error: 2326 if (PyErr_Occurred()) 2327 PyErr_SetString(PyExc_ImportError, "perf: Init failed!"); 2328 return module; 2329 } 2330