Lines Matching refs:cpu

79 			if (data->cpus_data.cpu[i] == (u16) -1)  in cpu_map__from_entries()
80 RC_CHK_ACCESS(map)->map[i].cpu = -1; in cpu_map__from_entries()
82 RC_CHK_ACCESS(map)->map[i].cpu = (int) data->cpus_data.cpu[i]; in cpu_map__from_entries()
106 int cpu; in cpu_map__from_mask() local
109 for_each_set_bit(cpu, local_copy, 64) in cpu_map__from_mask()
110 RC_CHK_ACCESS(map)->map[j++].cpu = cpu + cpus_per_i; in cpu_map__from_mask()
127 RC_CHK_ACCESS(map)->map[i++].cpu = -1; in cpu_map__from_range()
129 for (int cpu = data->range_cpu_data.start_cpu; cpu <= data->range_cpu_data.end_cpu; in cpu_map__from_range() local
130 i++, cpu++) in cpu_map__from_range()
131 RC_CHK_ACCESS(map)->map[i].cpu = cpu; in cpu_map__from_range()
167 RC_CHK_ACCESS(cpus)->map[i].cpu = -1; in perf_cpu_map__empty_new()
188 static int cpu__get_topology_int(int cpu, const char *name, int *value) in cpu__get_topology_int() argument
193 "devices/system/cpu/cpu%d/topology/%s", cpu, name); in cpu__get_topology_int()
198 int cpu__get_socket_id(struct perf_cpu cpu) in cpu__get_socket_id() argument
200 int value, ret = cpu__get_topology_int(cpu.cpu, "physical_package_id", &value); in cpu__get_socket_id()
204 struct aggr_cpu_id aggr_cpu_id__socket(struct perf_cpu cpu, void *data __maybe_unused) in aggr_cpu_id__socket() argument
208 id.socket = cpu__get_socket_id(cpu); in aggr_cpu_id__socket()
240 struct perf_cpu cpu; in cpu_aggr_map__new() local
249 perf_cpu_map__for_each_cpu(cpu, idx, cpus) { in cpu_aggr_map__new()
251 struct aggr_cpu_id cpu_id = get_id(cpu, data); in cpu_aggr_map__new()
282 int cpu__get_die_id(struct perf_cpu cpu) in cpu__get_die_id() argument
284 int value, ret = cpu__get_topology_int(cpu.cpu, "die_id", &value); in cpu__get_die_id()
289 struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data) in aggr_cpu_id__die() argument
294 die = cpu__get_die_id(cpu); in aggr_cpu_id__die()
304 id = aggr_cpu_id__socket(cpu, data); in aggr_cpu_id__die()
312 int cpu__get_cluster_id(struct perf_cpu cpu) in cpu__get_cluster_id() argument
314 int value, ret = cpu__get_topology_int(cpu.cpu, "cluster_id", &value); in cpu__get_cluster_id()
319 struct aggr_cpu_id aggr_cpu_id__cluster(struct perf_cpu cpu, void *data) in aggr_cpu_id__cluster() argument
321 int cluster = cpu__get_cluster_id(cpu); in aggr_cpu_id__cluster()
328 id = aggr_cpu_id__die(cpu, data); in aggr_cpu_id__cluster()
336 int cpu__get_core_id(struct perf_cpu cpu) in cpu__get_core_id() argument
338 int value, ret = cpu__get_topology_int(cpu.cpu, "core_id", &value); in cpu__get_core_id()
342 struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data) in aggr_cpu_id__core() argument
345 int core = cpu__get_core_id(cpu); in aggr_cpu_id__core()
348 id = aggr_cpu_id__cluster(cpu, data); in aggr_cpu_id__core()
361 struct aggr_cpu_id aggr_cpu_id__cpu(struct perf_cpu cpu, void *data) in aggr_cpu_id__cpu() argument
366 id = aggr_cpu_id__core(cpu, data); in aggr_cpu_id__cpu()
370 id.cpu = cpu; in aggr_cpu_id__cpu()
375 struct aggr_cpu_id aggr_cpu_id__node(struct perf_cpu cpu, void *data __maybe_unused) in aggr_cpu_id__node() argument
379 id.node = cpu__get_node(cpu); in aggr_cpu_id__node()
383 struct aggr_cpu_id aggr_cpu_id__global(struct perf_cpu cpu, void *data __maybe_unused) in aggr_cpu_id__global() argument
388 cpu.cpu = 0; in aggr_cpu_id__global()
389 id.cpu = cpu; in aggr_cpu_id__global()
433 max_cpu_num.cpu = 4096; in set_max_cpu_num()
434 max_present_cpu_num.cpu = 4096; in set_max_cpu_num()
447 ret = get_max_num(path, &max_cpu_num.cpu); in set_max_cpu_num()
458 ret = get_max_num(path, &max_present_cpu_num.cpu); in set_max_cpu_num()
462 pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num.cpu); in set_max_cpu_num()
503 if (unlikely(!max_cpu_num.cpu)) in cpu__max_cpu()
511 if (unlikely(!max_present_cpu_num.cpu)) in cpu__max_present_cpu()
518 int cpu__get_node(struct perf_cpu cpu) in cpu__get_node() argument
525 return cpunode_map[cpu.cpu]; in cpu__get_node()
535 cpunode_map = calloc(max_cpu_num.cpu, sizeof(int)); in init_cpunode_map()
541 for (i = 0; i < max_cpu_num.cpu; i++) in init_cpunode_map()
551 unsigned int cpu, mem; in cpu__setup_cpunode_map() local
590 if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1) in cpu__setup_cpunode_map()
592 cpunode_map[cpu] = mem; in cpu__setup_cpunode_map()
609 struct perf_cpu cpu = { .cpu = INT_MAX }; in cpu_map__snprint() local
613 cpu = perf_cpu_map__cpu(map, i); in cpu_map__snprint()
620 perf_cpu_map__cpu(map, i).cpu); in cpu_map__snprint()
622 } else if (((i - start) != (cpu.cpu - perf_cpu_map__cpu(map, start).cpu)) || last) { in cpu_map__snprint()
628 perf_cpu_map__cpu(map, start).cpu); in cpu_map__snprint()
632 perf_cpu_map__cpu(map, start).cpu, perf_cpu_map__cpu(map, end).cpu); in cpu_map__snprint()
664 bitmap = zalloc(last_cpu.cpu / 8 + 1); in cpu_map__snprint_mask()
671 bitmap[c.cpu / 8] |= 1 << (c.cpu % 8); in cpu_map__snprint_mask()
673 for (int cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) { in cpu_map__snprint_mask() local
674 unsigned char bits = bitmap[cpu / 8]; in cpu_map__snprint_mask()
676 if (cpu % 8) in cpu_map__snprint_mask()
682 if ((cpu % 32) == 0 && cpu > 0) in cpu_map__snprint_mask()
712 a->cpu.cpu == b->cpu.cpu; in aggr_cpu_id__equal()
725 a->cpu.cpu == -1; in aggr_cpu_id__is_empty()
739 .cpu = (struct perf_cpu){ .cpu = -1 }, in aggr_cpu_id__empty()