Lines Matching +full:cpu +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
18 void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
20 RC_CHK_ACCESS(map)->nr = nr_cpus;
33 cpus->nr = nr_cpus;
34 refcount_set(&cpus->refcnt, 1);
44 RC_CHK_ACCESS(cpus)->map[0].cpu = -1;
49 static void cpu_map__delete(struct perf_cpu_map *map)
51 if (map) {
52 WARN_ONCE(refcount_read(perf_cpu_map__refcnt(map)) != 0,
54 RC_CHK_FREE(map);
58 struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
62 if (RC_CHK_GET(result, map))
63 refcount_inc(perf_cpu_map__refcnt(map));
68 void perf_cpu_map__put(struct perf_cpu_map *map)
70 if (map) {
71 if (refcount_dec_and_test(perf_cpu_map__refcnt(map)))
72 cpu_map__delete(map);
74 RC_CHK_PUT(map);
89 pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.",
98 RC_CHK_ACCESS(cpus)->map[i].cpu = i;
110 if (sysfs__read_str("devices/system/cpu/online", &buf, &buf_len) >= 0) {
132 return cpu_a->cpu - cpu_b->cpu;
137 return RC_CHK_ACCESS(cpus)->map[idx];
147 memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size);
148 qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
153 __perf_cpu_map__cpu(cpus, i).cpu !=
154 __perf_cpu_map__cpu(cpus, i - 1).cpu) {
155 RC_CHK_ACCESS(cpus)->map[j++].cpu =
156 __perf_cpu_map__cpu(cpus, i).cpu;
179 * TOPOLOGY header for NUMA nodes with no CPU
180 * ( e.g., because of CPU hotplug)
189 || (*p != '\0' && *p != ',' && *p != '-' && *p != '\n'))
192 if (*p == '-') {
212 if (tmp_cpus[i].cpu == (int)start_cpu)
216 max_entries += max(end_cpu - start_cpu + 1, 16UL);
222 tmp_cpus[nr_cpus++].cpu = (int)start_cpu;
233 pr_warning("Unexpected characters at end of cpu list ('%s'), using online CPUs.",
247 return RC_CHK_ACCESS(cpus)->nr;
253 .cpu = -1
267 bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map)
269 return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
272 bool perf_cpu_map__is_any_cpu_or_is_empty(const struct perf_cpu_map *map)
274 if (!map)
277 return __perf_cpu_map__nr(map) == 1 && __perf_cpu_map__cpu(map, 0).cpu == -1;
280 bool perf_cpu_map__is_empty(const struct perf_cpu_map *map)
282 return map == NULL;
285 int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
290 return -1;
298 if (cpu_at_idx.cpu == cpu.cpu)
301 if (cpu_at_idx.cpu > cpu.cpu)
307 return -1;
310 bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
312 return perf_cpu_map__idx(cpus, cpu) != -1;
330 if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu)
336 bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map)
338 return map && __perf_cpu_map__cpu(map, 0).cpu == -1;
341 struct perf_cpu perf_cpu_map__min(const struct perf_cpu_map *map)
343 struct perf_cpu cpu, result = {
344 .cpu = -1
348 perf_cpu_map__for_each_cpu_skip_any(cpu, idx, map) {
349 result = cpu;
355 struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
358 .cpu = -1
362 return __perf_cpu_map__nr(map) > 0
363 ? __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1)
376 if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu)
378 if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
396 * Otherwise, '*orig' gets freed and replaced with a new map.
416 return -ENOMEM;
421 if (__perf_cpu_map__cpu(*orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) {
422 if (__perf_cpu_map__cpu(*orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu)
463 if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
465 else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)