| /linux/tools/lib/perf/include/perf/ |
| H A D | cpumap.h | 52 LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx); 91 for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx); \ 93 (idx)++, (cpu) = perf_cpu_map__cpu(cpus, idx)) 96 for ((idx) = 0, (_cpu) = perf_cpu_map__cpu(cpus, idx); \ 98 (idx)++, (_cpu) = perf_cpu_map__cpu(cpus, idx)) \
|
| /linux/tools/perf/arch/x86/util/ |
| H A D | pmu.c | 197 cpu_adjust[pmu_snc] = perf_cpu_map__cpu(node_cpus, 0).cpu; in uncore_cha_imc_compute_cpu_adjust() 233 if (perf_cpu_map__cpu(pmu->cpus, 0).cpu != 0) { in gnr_uncore_cha_imc_adjust_cpumask_for_snc() 260 cpu_adjust = perf_cpu_map__cpu(adjusted[pmu_snc], idx).cpu - cpu.cpu; in gnr_uncore_cha_imc_adjust_cpumask_for_snc() 263 assert(perf_cpu_map__cpu(adjusted[pmu_snc], idx).cpu == in gnr_uncore_cha_imc_adjust_cpumask_for_snc()
|
| /linux/tools/perf/tests/ |
| H A D | event_update.c | 73 TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 0).cpu == 1); in process_event_cpus() 74 TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 1).cpu == 2); in process_event_cpus() 75 TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 2).cpu == 3); in process_event_cpus()
|
| H A D | bitmap.c | |
| H A D | mmap-basic.c | 65 CPU_SET(perf_cpu_map__cpu(cpus, 0).cpu, &cpu_set); in test__basic_mmap() 69 perf_cpu_map__cpu(cpus, 0).cpu, in test__basic_mmap()
|
| H A D | topology.c | 111 if (cpu__get_socket_id(perf_cpu_map__cpu(map, 0)) == -1) in check_cpu_topology()
|
| /linux/tools/perf/util/ |
| H A D | record.c | 102 if (perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0).cpu < 0) in evlist__config() 244 cpu = perf_cpu_map__cpu(cpus, 0); in evlist__can_select_event() 248 cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0); in evlist__can_select_event()
|
| H A D | cpumap.c | 639 cpu = perf_cpu_map__cpu(map, i); in cpu_map__snprint() 646 perf_cpu_map__cpu(map, i).cpu); in cpu_map__snprint() 648 } else if (((i - start) != (cpu.cpu - perf_cpu_map__cpu(map, start).cpu)) || last) { in cpu_map__snprint() 654 perf_cpu_map__cpu(map, start).cpu); in cpu_map__snprint() 658 perf_cpu_map__cpu(map, start).cpu, perf_cpu_map__cpu(map, end).cpu); in cpu_map__snprint()
|
| H A D | perf_api_probe.c | 70 cpu = perf_cpu_map__cpu(cpus, 0); in perf_probe_api() 158 cpu = perf_cpu_map__cpu(cpus, 0); in perf_can_record_cpu_wide()
|
| H A D | bpf_counter.c | 339 int cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx).cpu; in bpf_program_profiler__install_pe() 627 key = perf_cpu_map__cpu(evsel->core.cpus, i).cpu; in bperf__load() 658 int cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx).cpu; in bperf__install_pe() 723 cpu = perf_cpu_map__cpu(evsel__cpus(evsel), i).cpu; in bperf__read()
|
| H A D | cpumap.h | 71 return perf_cpu_map__nr(cpus) == 1 && perf_cpu_map__cpu(cpus, 0).cpu == -1; in cpu_map__is_dummy()
|
| H A D | tool_pmu.c | 279 cpu = perf_cpu_map__cpu(evsel->core.cpus, idx); in evsel__tool_pmu_open() 537 struct perf_cpu cpu = perf_cpu_map__cpu(evsel->core.cpus, in evsel__tool_pmu_read()
|
| H A D | bpf_ftrace.c | 90 cpu = perf_cpu_map__cpu(ftrace->evlist->core.user_requested_cpus, i).cpu; in perf_ftrace__latency_prepare_bpf()
|
| H A D | stat.c | 309 struct perf_cpu cpu = perf_cpu_map__cpu(cpus, cpu_map_idx); in check_per_pkg() 425 struct perf_cpu cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx); in process_counter_values()
|
| H A D | mmap.c | 255 cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */ in build_node_mask()
|
| H A D | cputopo.c | 413 if (load_numa_node(&tp->nodes[i], perf_cpu_map__cpu(node_map, i).cpu)) { in numa_topology__new()
|
| H A D | bpf_off_cpu.c | 241 cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu; in off_cpu_prepare()
|
| /linux/tools/lib/perf/ |
| H A D | evlist.c | 121 evsel->cpus = perf_cpu_map__new_int(perf_cpu_map__cpu(srcs[i], 0).cpu); in __perf_evlist__propagate_maps() 496 sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu); in perf_evsel__set_sid_idx() 544 struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->all_cpus, cpu_idx); in mmap_per_evsel()
|
| /linux/tools/perf/bench/ |
| H A D | futex-wake.c | 119 CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset); in block_threads()
|
| H A D | futex-lock-pi.c | 151 CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset); in create_threads()
|
| H A D | futex-hash.c | 195 CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset); in bench_futex_hash()
|
| H A D | futex-requeue.c | 146 CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset); in block_threads()
|
| H A D | futex-wake-parallel.c | 170 CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset); in block_threads()
|
| H A D | epoll-ctl.c | 264 CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, in do_threads()
|
| /linux/tools/lib/perf/Documentation/ |
| H A D | libperf.txt | 46 int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
|