Lines Matching +full:data +full:- +full:path
1 // SPDX-License-Identifier: GPL-2.0
27 const struct perf_record_cpu_map_data *data)
34 return (data->mask32_data.long_size == 4)
35 ? (bit_word32 < data->mask32_data.nr) &&
36 (data->mask32_data.mask[bit_word32] & bit_mask32) != 0
37 : (bit_word64 < data->mask64_data.nr) &&
38 (data->mask64_data.mask[bit_word64] & bit_mask64) != 0;
41 /* Read ith mask value from data into the given 64-bit sized bitmap */
42 static void perf_record_cpu_map_data__read_one_mask(const struct perf_record_cpu_map_data *data,
46 if (data->mask32_data.long_size == 4)
47 bitmap[0] = data->mask32_data.mask[i];
49 bitmap[0] = data->mask64_data.mask[i];
51 if (data->mask32_data.long_size == 4) {
52 bitmap[0] = data->mask32_data.mask[i];
56 bitmap[0] = (unsigned long)(data->mask64_data.mask[i] >> 32);
57 bitmap[1] = (unsigned long)data->mask64_data.mask[i];
59 bitmap[0] = (unsigned long)data->mask64_data.mask[i];
60 bitmap[1] = (unsigned long)(data->mask64_data.mask[i] >> 32);
65 static struct perf_cpu_map *cpu_map__from_entries(const struct perf_record_cpu_map_data *data)
69 map = perf_cpu_map__empty_new(data->cpus_data.nr);
73 for (unsigned int i = 0; i < data->cpus_data.nr; i++) {
75 * Special treatment for -1, which is not real cpu number,
76 * and we need to use (int) -1 to initialize map[i],
79 if (data->cpus_data.cpu[i] == (u16) -1) {
80 RC_CHK_ACCESS(map)->map[i].cpu = -1;
81 } else if (data->cpus_data.cpu[i] < INT16_MAX) {
82 RC_CHK_ACCESS(map)->map[i].cpu = (int16_t) data->cpus_data.cpu[i];
84 pr_err("Invalid cpumap entry %u\n", data->cpus_data.cpu[i]);
93 static struct perf_cpu_map *cpu_map__from_mask(const struct perf_record_cpu_map_data *data)
96 int weight = 0, mask_nr = data->mask32_data.nr;
100 perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
109 int cpus_per_i = (i * data->mask32_data.long_size * BITS_PER_BYTE);
112 perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
115 RC_CHK_ACCESS(map)->map[j++].cpu = cpu + cpus_per_i;
127 static struct perf_cpu_map *cpu_map__from_range(const struct perf_record_cpu_map_data *data)
132 map = perf_cpu_map__empty_new(data->range_cpu_data.end_cpu -
133 data->range_cpu_data.start_cpu + 1 + data->range_cpu_data.any_cpu);
137 if (data->range_cpu_data.any_cpu)
138 RC_CHK_ACCESS(map)->map[i++].cpu = -1;
140 for (int cpu = data->range_cpu_data.start_cpu; cpu <= data->range_cpu_data.end_cpu;
143 RC_CHK_ACCESS(map)->map[i].cpu = cpu;
154 struct perf_cpu_map *cpu_map__new_data(const struct perf_record_cpu_map_data *data)
156 switch (data->type) {
158 return cpu_map__from_entries(data);
160 return cpu_map__from_mask(data);
162 return cpu_map__from_range(data);
164 pr_err("cpu_map__new_data unknown type %d\n", data->type);
185 RC_CHK_ACCESS(cpus)->map[i].cpu = -1;
198 cpus->nr = nr;
200 cpus->map[i] = aggr_cpu_id__empty();
208 char path[PATH_MAX];
210 snprintf(path, PATH_MAX,
213 return sysfs__read_int(path, value);
222 struct aggr_cpu_id aggr_cpu_id__socket(struct perf_cpu cpu, void *data __maybe_unused)
235 if (a->node != b->node)
236 return a->node - b->node;
237 else if (a->socket != b->socket)
238 return a->socket - b->socket;
239 else if (a->die != b->die)
240 return a->die - b->die;
241 else if (a->cluster != b->cluster)
242 return a->cluster - b->cluster;
243 else if (a->cache_lvl != b->cache_lvl)
244 return a->cache_lvl - b->cache_lvl;
245 else if (a->cache != b->cache)
246 return a->cache - b->cache;
247 else if (a->core != b->core)
248 return a->core - b->core;
250 return a->thread_idx - b->thread_idx;
255 void *data, bool needs_sort)
265 c->nr = 0;
269 struct aggr_cpu_id cpu_id = get_id(cpu, data);
271 for (int j = 0; j < c->nr; j++) {
272 if (aggr_cpu_id__equal(&cpu_id, &c->map[j])) {
278 c->map[c->nr] = cpu_id;
279 c->nr++;
283 if (c->nr != perf_cpu_map__nr(cpus)) {
286 sizeof(struct cpu_aggr_map) + sizeof(struct aggr_cpu_id) * c->nr);
294 qsort(c->map, c->nr, sizeof(struct aggr_cpu_id), aggr_cpu_id__cmp);
307 struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data)
322 id = aggr_cpu_id__socket(cpu, data);
337 struct aggr_cpu_id aggr_cpu_id__cluster(struct perf_cpu cpu, void *data)
346 id = aggr_cpu_id__die(cpu, data);
360 struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data)
366 id = aggr_cpu_id__cluster(cpu, data);
379 struct aggr_cpu_id aggr_cpu_id__cpu(struct perf_cpu cpu, void *data)
384 id = aggr_cpu_id__core(cpu, data);
393 struct aggr_cpu_id aggr_cpu_id__node(struct perf_cpu cpu, void *data __maybe_unused)
401 struct aggr_cpu_id aggr_cpu_id__global(struct perf_cpu cpu, void *data __maybe_unused)
412 static int get_max_num(char *path, int *max)
418 if (filename__read_str(path, &buf, &num))
419 return -1;
424 while (--num) {
425 if ((buf[num] == ',') || (buf[num] == '-')) {
431 err = -1;
435 /* convert from 0-based to 1-based */
447 char path[PATH_MAX];
448 int max, ret = -1;
459 ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
461 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
465 ret = get_max_num(path, &max);
472 ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
474 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
478 ret = get_max_num(path, &max);
482 ret = -1;
495 char path[PATH_MAX];
496 int ret = -1;
506 ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
508 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
512 ret = get_max_num(path, &max_node_num);
548 return -1;
564 return -1;
568 cpunode_map[i] = -1;
579 char path[PATH_MAX];
585 return -1;
591 n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
593 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
594 return -1;
597 dir1 = opendir(path);
603 if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
606 n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
608 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
616 if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
628 int i, start = -1;
641 if (start == -1) {
644 ret += snprintf(buf + ret, size - ret,
648 } else if (((i - start) != (cpu.cpu - perf_cpu_map__cpu(map, start).cpu)) || last) {
649 int end = i - 1;
652 ret += snprintf(buf + ret, size - ret,
656 ret += snprintf(buf + ret, size - ret,
657 "%s%d-%d", COMMA,
676 return val - 10 + 'a';
699 for (int cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) {
714 buf[size - 1] = '\0';
715 return ptr - buf;
730 return a->thread_idx == b->thread_idx &&
731 a->node == b->node &&
732 a->socket == b->socket &&
733 a->die == b->die &&
734 a->cluster == b->cluster &&
735 a->cache_lvl == b->cache_lvl &&
736 a->cache == b->cache &&
737 a->core == b->core &&
738 a->cpu.cpu == b->cpu.cpu;
743 return a->thread_idx == -1 &&
744 a->node == -1 &&
745 a->socket == -1 &&
746 a->die == -1 &&
747 a->cluster == -1 &&
748 a->cache_lvl == -1 &&
749 a->cache == -1 &&
750 a->core == -1 &&
751 a->cpu.cpu == -1;
757 .thread_idx = -1,
758 .node = -1,
759 .socket = -1,
760 .die = -1,
761 .cluster = -1,
762 .cache_lvl = -1,
763 .cache = -1,
764 .core = -1,
765 .cpu = (struct perf_cpu){ .cpu = -1 },