Lines Matching +full:period +full:- +full:scale

1 // SPDX-License-Identifier: GPL-2.0
26 INIT_LIST_HEAD(&evsel->node); in perf_evsel__init()
27 INIT_LIST_HEAD(&evsel->per_stream_periods); in perf_evsel__init()
28 evsel->attr = *attr; in perf_evsel__init()
29 evsel->idx = idx; in perf_evsel__init()
30 evsel->leader = evsel; in perf_evsel__init()
45 assert(evsel->fd == NULL); /* If not fds were not closed. */ in perf_evsel__exit()
46 assert(evsel->mmap == NULL); /* If not munmap wasn't called. */ in perf_evsel__exit()
47 assert(evsel->sample_id == NULL); /* If not free_id wasn't called. */ in perf_evsel__exit()
48 perf_cpu_map__put(evsel->cpus); in perf_evsel__exit()
49 perf_cpu_map__put(evsel->pmu_cpus); in perf_evsel__exit()
50 perf_thread_map__put(evsel->threads); in perf_evsel__exit()
60 ((int *)xyarray__entry(_evsel->fd, _cpu_map_idx, _thread))
62 (_evsel->mmap ? ((struct perf_mmap *) xyarray__entry(_evsel->mmap, _cpu_map_idx, _thread)) \
67 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); in perf_evsel__alloc_fd()
69 if (evsel->fd) { in perf_evsel__alloc_fd()
77 *fd = -1; in perf_evsel__alloc_fd()
82 return evsel->fd != NULL ? 0 : -ENOMEM; in perf_evsel__alloc_fd()
87 evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap)); in perf_evsel__alloc_mmap()
89 return evsel->mmap != NULL ? 0 : -ENOMEM; in perf_evsel__alloc_mmap()
102 struct perf_evsel *leader = evsel->leader; in get_group_fd()
106 *group_fd = -1; in get_group_fd()
114 if (!leader->fd) in get_group_fd()
115 return -ENOTCONN; in get_group_fd()
118 if (fd == NULL || *fd == -1) in get_group_fd()
119 return -EBADF; in get_group_fd()
138 return -ENOMEM; in perf_evsel__open()
150 return -ENOMEM; in perf_evsel__open()
156 if (evsel->fd == NULL && in perf_evsel__open()
157 perf_evsel__alloc_fd(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0) in perf_evsel__open()
158 return -ENOMEM; in perf_evsel__open()
161 for (thread = 0; thread < threads->nr; thread++) { in perf_evsel__open()
166 err = -EINVAL; in perf_evsel__open()
174 fd = sys_perf_event_open(&evsel->attr, in perf_evsel__open()
175 threads->map[thread].pid, in perf_evsel__open()
179 err = -errno; in perf_evsel__open()
197 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) { in perf_evsel__close_fd_cpu()
202 *fd = -1; in perf_evsel__close_fd_cpu()
209 for (int idx = 0; idx < xyarray__max_x(evsel->fd); idx++) in perf_evsel__close_fd()
215 xyarray__delete(evsel->fd); in perf_evsel__free_fd()
216 evsel->fd = NULL; in perf_evsel__free_fd()
221 if (evsel->fd == NULL) in perf_evsel__close()
230 if (evsel->fd == NULL) in perf_evsel__close_cpu()
240 if (evsel->fd == NULL || evsel->mmap == NULL) in perf_evsel__munmap()
243 for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) { in perf_evsel__munmap()
244 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { in perf_evsel__munmap()
254 xyarray__delete(evsel->mmap); in perf_evsel__munmap()
255 evsel->mmap = NULL; in perf_evsel__munmap()
263 .mask = (pages * page_size) - 1, in perf_evsel__mmap()
266 if (evsel->fd == NULL || evsel->mmap) in perf_evsel__mmap()
267 return -EINVAL; in perf_evsel__mmap()
269 if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0) in perf_evsel__mmap()
270 return -ENOMEM; in perf_evsel__mmap()
272 for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) { in perf_evsel__mmap()
273 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { in perf_evsel__mmap()
276 struct perf_cpu cpu = perf_cpu_map__cpu(evsel->cpus, idx); in perf_evsel__mmap()
302 return MMAP(evsel, cpu_map_idx, thread)->base; in perf_evsel__mmap_base()
307 u64 read_format = evsel->attr.read_format; in perf_evsel__read_size()
325 nr = evsel->nr_members; in perf_evsel__read_size()
339 u64 read_format = evsel->attr.read_format; in perf_evsel__read_group()
344 return -EINVAL; in perf_evsel__read_group()
348 return -ENOMEM; in perf_evsel__read_group()
352 return -errno; in perf_evsel__read_group()
360 count->ena = data[idx++]; in perf_evsel__read_group()
362 count->run = data[idx++]; in perf_evsel__read_group()
365 count->val = data[idx++]; in perf_evsel__read_group()
367 count->id = data[idx++]; in perf_evsel__read_group()
369 count->lost = data[idx++]; in perf_evsel__read_group()
382 u64 read_format = evsel->attr.read_format; in perf_evsel__adjust_values()
385 count->val = buf[n++]; in perf_evsel__adjust_values()
388 count->ena = buf[n++]; in perf_evsel__adjust_values()
391 count->run = buf[n++]; in perf_evsel__adjust_values()
394 count->id = buf[n++]; in perf_evsel__adjust_values()
397 count->lost = buf[n++]; in perf_evsel__adjust_values()
405 u64 read_format = evsel->attr.read_format; in perf_evsel__read()
411 return -EINVAL; in perf_evsel__read()
422 return -errno; in perf_evsel__read()
434 return -1; in perf_evsel__ioctl()
445 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { in perf_evsel__run_ioctl()
466 perf_cpu_map__for_each_cpu(cpu, idx, evsel->cpus) { in perf_evsel__enable_thread()
480 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++) in perf_evsel__enable()
495 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++) in perf_evsel__disable()
504 for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++) in perf_evsel__apply_filter()
513 return evsel->cpus; in perf_evsel__cpus()
518 return evsel->threads; in perf_evsel__threads()
523 return &evsel->attr; in perf_evsel__attr()
531 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); in perf_evsel__alloc_id()
532 if (evsel->sample_id == NULL) in perf_evsel__alloc_id()
533 return -ENOMEM; in perf_evsel__alloc_id()
535 evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); in perf_evsel__alloc_id()
536 if (evsel->id == NULL) { in perf_evsel__alloc_id()
537 xyarray__delete(evsel->sample_id); in perf_evsel__alloc_id()
538 evsel->sample_id = NULL; in perf_evsel__alloc_id()
539 return -ENOMEM; in perf_evsel__alloc_id()
549 xyarray__delete(evsel->sample_id); in perf_evsel__free_id()
550 evsel->sample_id = NULL; in perf_evsel__free_id()
551 zfree(&evsel->id); in perf_evsel__free_id()
552 evsel->ids = 0; in perf_evsel__free_id()
555 list_del_init(&pos->node); in perf_evsel__free_id()
562 return (evsel->attr.sample_type & PERF_SAMPLE_READ) && in perf_evsel__attr_has_per_thread_sample_period()
563 (evsel->attr.sample_type & PERF_SAMPLE_TID) && in perf_evsel__attr_has_per_thread_sample_period()
564 evsel->attr.inherit; in perf_evsel__attr_has_per_thread_sample_period()
574 return &sid->period; in perf_sample_id__get_period_storage()
577 head = &sid->periods[hash]; in perf_sample_id__get_period_storage()
580 if (res->tid == tid) in perf_sample_id__get_period_storage()
581 return &res->period; in perf_sample_id__get_period_storage()
583 if (sid->evsel == NULL) in perf_sample_id__get_period_storage()
590 INIT_LIST_HEAD(&res->node); in perf_sample_id__get_period_storage()
591 res->tid = tid; in perf_sample_id__get_period_storage()
593 list_add_tail(&res->node, &sid->evsel->per_stream_periods); in perf_sample_id__get_period_storage()
594 hlist_add_head(&res->hnode, &sid->periods[hash]); in perf_sample_id__get_period_storage()
596 return &res->period; in perf_sample_id__get_period_storage()
600 bool scale, __s8 *pscaled) in perf_counts_values__scale() argument
604 if (scale) { in perf_counts_values__scale()
605 if (count->run == 0) { in perf_counts_values__scale()
606 scaled = -1; in perf_counts_values__scale()
607 count->val = 0; in perf_counts_values__scale()
608 } else if (count->run < count->ena) { in perf_counts_values__scale()
610 count->val = (u64)((double)count->val * count->ena / count->run); in perf_counts_values__scale()