| /linux/kernel/sched/ |
| H A D | topology.c | 361 static void perf_domain_debug(const struct cpumask *cpu_map, in perf_domain_debug() argument 367 printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map)); in perf_domain_debug() 409 static bool build_perf_domains(const struct cpumask *cpu_map) in build_perf_domains() argument 413 int cpu = cpumask_first(cpu_map); in build_perf_domains() 419 if (!sched_is_eas_possible(cpu_map)) in build_perf_domains() 422 for_each_cpu(i, cpu_map) { in build_perf_domains() 435 perf_domain_debug(cpu_map, pd); in build_perf_domains() 1396 const struct cpumask *cpu_map) in asym_cpu_capacity_classify() 1410 else if (cpumask_intersects(cpu_map, cpu_capacity_span(entry))) in asym_cpu_capacity_classify() 1534 static void __sdt_free(const struct cpumask *cpu_map); in __free_domain_allocs() 1391 asym_cpu_capacity_classify(const struct cpumask * sd_span,const struct cpumask * cpu_map) asym_cpu_capacity_classify() argument 1533 __free_domain_allocs(struct s_data * d,enum s_alloc what,const struct cpumask * cpu_map) __free_domain_allocs() argument 1552 __visit_domain_allocation_hell(struct s_data * d,const struct cpumask * cpu_map) __visit_domain_allocation_hell() argument 1633 sd_init(struct sched_domain_topology_level * tl,const struct cpumask * cpu_map,struct sched_domain * child,int cpu) sd_init() argument 2358 __sdt_alloc(const struct cpumask * cpu_map) __sdt_alloc() argument 2425 __sdt_free(const struct cpumask * cpu_map) __sdt_free() argument 2462 build_sched_domain(struct sched_domain_topology_level * tl,const struct cpumask * cpu_map,struct sched_domain_attr * attr,struct sched_domain * child,int cpu) build_sched_domain() argument 2493 topology_span_sane(const struct cpumask * cpu_map) topology_span_sane() argument 2551 build_sched_domains(const struct cpumask * cpu_map,struct sched_domain_attr * attr) build_sched_domains() argument 2756 sched_init_domains(const struct cpumask * cpu_map) sched_init_domains() argument 2780 detach_destroy_domains(const struct cpumask * cpu_map) detach_destroy_domains() argument [all...] |
| /linux/tools/power/x86/intel-speed-select/ |
| H A D | isst-config.c | 74 struct _cpu_map *cpu_map; variable 355 if (cpu_map && cpu_map[cpu].initialized) in get_physical_package_id() 356 return cpu_map[cpu].pkg_id; in get_physical_package_id() 379 if (cpu_map && cpu_map[cpu].initialized) in get_physical_core_id() 380 return cpu_map[cpu].core_id; in get_physical_core_id() 403 if (cpu_map && cpu_map[cpu].initialized) in get_physical_die_id() 404 return cpu_map[cp in get_physical_die_id() 722 update_punit_cpu_info(__u32 physical_cpu,struct _cpu_map * cpu_map) update_punit_cpu_info() argument [all...] |
| /linux/arch/mips/kernel/ |
| H A D | cacheinfo.c | 58 static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map) in fill_cpumask_siblings() argument 64 cpumask_set_cpu(cpu1, cpu_map); in fill_cpumask_siblings() 67 static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map) in fill_cpumask_cluster() argument 74 cpumask_set_cpu(cpu1, cpu_map); in fill_cpumask_cluster()
|
| /linux/tools/perf/arch/arm64/util/ |
| H A D | arm-spe.c | 83 struct perf_cpu_map *cpu_map = arm_spe_find_cpus(evlist); in arm_spe_info_priv_size() local 86 if (!cpu_map) in arm_spe_info_priv_size() 90 ARM_SPE_CPU_PRIV_MAX * perf_cpu_map__nr(cpu_map); in arm_spe_info_priv_size() 93 perf_cpu_map__put(cpu_map); in arm_spe_info_priv_size() 152 struct perf_cpu_map *cpu_map; in arm_spe_info_fill() local 162 cpu_map = arm_spe_find_cpus(session->evlist); in arm_spe_info_fill() 163 if (!cpu_map) in arm_spe_info_fill() 171 auxtrace_info->priv[ARM_SPE_CPUS_NUM] = perf_cpu_map__nr(cpu_map); in arm_spe_info_fill() 174 perf_cpu_map__for_each_cpu(cpu, i, cpu_map) { in arm_spe_info_fill() 185 perf_cpu_map__put(cpu_map); in arm_spe_info_fill()
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | freplace_progmap.c | 10 } cpu_map SEC(".maps"); 21 return bpf_redirect_map(&cpu_map, 0, XDP_PASS); in xdp_cpumap_prog()
|
| H A D | test_xdp_with_cpumap_frags_helpers.c | 13 } cpu_map SEC(".maps");
|
| H A D | xdp_features.c | 52 } cpu_map SEC(".maps"); 222 return bpf_redirect_map(&cpu_map, 0, 0); in xdp_do_redirect()
|
| /linux/Documentation/bpf/ |
| H A D | map_cpumap.rst | 103 ``cpu_map`` and how to redirect packets to a remote CPU using a round robin scheme. 112 } cpu_map SEC(".maps"); 153 return bpf_redirect_map(&cpu_map, cpu_dest, 0); 164 int set_max_cpu_entries(struct bpf_map *cpu_map) 166 if (bpf_map__set_max_entries(cpu_map, libbpf_num_possible_cpus()) < 0) { 167 fprintf(stderr, "Failed to set max entries for cpu_map map: %s",
|
| /linux/tools/perf/util/ |
| H A D | mmap.c | 248 struct perf_cpu_map *cpu_map = cpu_map__online(); in build_node_mask() local 250 if (!cpu_map) in build_node_mask() 253 nr_cpus = perf_cpu_map__nr(cpu_map); in build_node_mask() 255 cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */ in build_node_mask() 259 perf_cpu_map__put(cpu_map); in build_node_mask()
|
| H A D | mem-events.c | 258 struct perf_cpu_map *cpu_map = NULL; in perf_mem_events__record_args() local 297 ret = perf_cpu_map__merge(&cpu_map, pmu->cpus); in perf_mem_events__record_args() 305 if (cpu_map) { in perf_mem_events__record_args() 308 if (!perf_cpu_map__equal(cpu_map, online)) { in perf_mem_events__record_args() 311 cpu_map__snprint(cpu_map, buf, sizeof(buf)); in perf_mem_events__record_args() 315 perf_cpu_map__put(cpu_map); in perf_mem_events__record_args()
|
| H A D | tool.h | 78 cpu_map, member
|
| H A D | tool.c | 307 tool->cpu_map = process_event_cpu_map_stub; in perf_tool__init() 407 CREATE_DELEGATE_OP2(cpu_map); 490 tool->tool.cpu_map = delegate_cpu_map; in delegate_tool__init()
|
| /linux/drivers/platform/x86/intel/speed_select_if/ |
| H A D | isst_if_common.c | 465 struct isst_if_cpu_map *cpu_map; in isst_if_proc_phyid_req() local 467 cpu_map = (struct isst_if_cpu_map *)cmd_ptr; in isst_if_proc_phyid_req() 468 if (cpu_map->logical_cpu >= nr_cpu_ids || in isst_if_proc_phyid_req() 469 cpu_map->logical_cpu >= num_possible_cpus()) in isst_if_proc_phyid_req() 473 cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id; in isst_if_proc_phyid_req() 597 cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map); in isst_if_def_ioctl()
|
| /linux/drivers/base/ |
| H A D | cacheinfo.c | 996 cpumask_t *cpu_map) in update_per_cpu_data_slice_size() argument 1000 for_each_cpu(icpu, cpu_map) { in update_per_cpu_data_slice_size() 1011 cpumask_t *cpu_map; in cacheinfo_cpu_online() local 1018 if (cpu_map_shared_cache(true, cpu, &cpu_map)) in cacheinfo_cpu_online() 1019 update_per_cpu_data_slice_size(true, cpu, cpu_map); in cacheinfo_cpu_online() 1028 cpumask_t *cpu_map; in cacheinfo_cpu_pre_down() local 1031 nr_shared = cpu_map_shared_cache(false, cpu, &cpu_map); in cacheinfo_cpu_pre_down() 1037 update_per_cpu_data_slice_size(false, cpu, cpu_map); in cacheinfo_cpu_pre_down()
|
| /linux/kernel/bpf/ |
| H A D | cpumap.c | 80 struct bpf_cpu_map_entry __rcu **cpu_map; member 106 cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries * in cpu_map_alloc() 109 if (!cmap->cpu_map) { in cpu_map_alloc() 555 old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu))); in __cpu_map_entry_replace() 623 * bpf_cpu_map->cpu_map, but also ensure pending flush operations in cpu_map_free() 634 rcpu = rcu_dereference_raw(cmap->cpu_map[i]); in cpu_map_free() 641 bpf_map_area_free(cmap->cpu_map); 657 rcpu = rcu_dereference_check(cmap->cpu_map[key], in cpu_map_lookup_elem()
|
| /linux/tools/testing/selftests/bpf/prog_tests/ |
| H A D | xdp_cpumap_attach.c | 41 map_fd = bpf_map__fd(skel->maps.cpu_map); in test_xdp_with_cpumap_helpers() 130 map_fd = bpf_map__fd(skel->maps.cpu_map); in test_xdp_with_cpumap_frags_helpers()
|
| /linux/mm/ |
| H A D | percpu.c | 2404 * @nr_units units. The returned ai's groups[0].cpu_map points to the in pcpu_alloc_alloc_info() 2405 * cpu_map array which is long enough for @nr_units and filled with in pcpu_alloc_alloc_info() 2406 * NR_CPUS. It's the caller's responsibility to initialize cpu_map in pcpu_alloc_alloc_info() 2422 __alignof__(ai->groups[0].cpu_map[0])); in pcpu_alloc_alloc_info() 2423 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); in pcpu_alloc_alloc_info() 2431 ai->groups[0].cpu_map = ptr; 2434 ai->groups[0].cpu_map[unit] = NR_CPUS; 2500 if (gi->cpu_map[unit] != NR_CPUS) 2502 cpu_width, gi->cpu_map[unit]); 2632 cpu = gi->cpu_map[ in pcpu_setup_first_chunk() 2807 unsigned int *cpu_map; pcpu_build_alloc_info() local [all...] |
| /linux/tools/perf/python/ |
| H A D | twatch.py | 12 cpus = perf.cpu_map()
|
| H A D | tracepoint.py | 17 cpus = perf.cpu_map()
|
| /linux/tools/perf/arch/arm/util/ |
| H A D | cs-etm.c | 781 struct perf_cpu_map *cpu_map; in cs_etm_info_fill() local 797 cpu_map = online_cpus; in cs_etm_info_fill() 805 cpu_map = event_cpus; in cs_etm_info_fill() 808 nr_cpu = perf_cpu_map__nr(cpu_map); in cs_etm_info_fill() 821 perf_cpu_map__for_each_cpu(cpu, i, cpu_map) { in cs_etm_info_fill()
|
| /linux/include/linux/ |
| H A D | percpu.h | 81 unsigned int *cpu_map; /* unit->cpu map, empty member
|
| /linux/drivers/gpu/drm/imagination/ |
| H A D | pvr_queue.c | 1242 void *cpu_map; in pvr_queue_create() local 1301 cpu_map = pvr_fw_object_create_and_map(pvr_dev, sizeof(*queue->timeline_ufo.value), in pvr_queue_create() 1304 if (IS_ERR(cpu_map)) { in pvr_queue_create() 1305 err = PTR_ERR(cpu_map); in pvr_queue_create() 1309 queue->timeline_ufo.value = cpu_map; in pvr_queue_create()
|
| /linux/tools/perf/ |
| H A D | builtin-stat.c | 1295 struct perf_cpu_map *cpu_map = perf_cpu_map__new(map); in cpu__get_cache_id_from_map() local 1302 id = perf_cpu_map__min(cpu_map).cpu; in cpu__get_cache_id_from_map() 1307 perf_cpu_map__put(cpu_map); in cpu__get_cache_id_from_map() 1685 struct perf_cpu_map *cpu_map; in perf_env__get_cache_id_for_cpu() local 1696 cpu_map = perf_cpu_map__new(caches[i].map); in perf_env__get_cache_id_for_cpu() 1697 map_contains_cpu = perf_cpu_map__idx(cpu_map, cpu); in perf_env__get_cache_id_for_cpu() 1698 perf_cpu_map__put(cpu_map); in perf_env__get_cache_id_for_cpu() 2308 cpus = cpu_map__new_data(&event->cpu_map.data); in process_cpu_map_event() 2368 perf_stat.tool.cpu_map = process_cpu_map_event; in __cmd_report()
|
| /linux/tools/perf/tests/ |
| H A D | tests.h | 150 DECLARE_SUITE(cpu_map);
|
| /linux/include/uapi/linux/ |
| H A D | isst_if.h | 63 struct isst_if_cpu_map cpu_map[1]; member
|