Home
last modified time | relevance | path

Searched +full:cpu +full:- +full:map (Results 1 – 25 of 1111) sorted by relevance

12345678910>>...45

/linux/tools/perf/tests/
H A Dtopology.c1 // SPDX-License-Identifier: GPL-2.0
14 #define TEMPL "/tmp/perf-test-XXXXXX"
26 return -1; in get_temp()
44 session->evlist = evlist__new_default(); in session_write_header()
45 TEST_ASSERT_VAL("can't get evlist", session->evlist); in session_write_header()
46 session->evlist->session = session; in session_write_header()
48 perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY); in session_write_header()
49 perf_header__set_feat(&session->header, HEADER_NRCPUS); in session_write_header()
50 perf_header__set_feat(&session->header, HEADER_ARCH); in session_write_header()
52 session->header.data_size += DATA_SIZE; in session_write_header()
[all …]
H A Dmem2node.c1 // SPDX-License-Identifier: GPL-2.0
15 const char *map; member
17 { .node = 0, .map = "0" },
18 { .node = 1, .map = "1-2" },
19 { .node = 3, .map = "5-7,9" },
26 struct perf_cpu_map *map = perf_cpu_map__new(str); in get_bitmap() local
31 if (map && bm) { in get_bitmap()
32 struct perf_cpu cpu; in get_bitmap() local
35 perf_cpu_map__for_each_cpu(cpu, i, map) in get_bitmap()
36 __set_bit(cpu.cpu, bm); in get_bitmap()
[all …]
/linux/tools/lib/perf/
H A Dcpumap.c1 // SPDX-License-Identifier: GPL-2.0-only
18 void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus) in perf_cpu_map__set_nr() argument
20 RC_CHK_ACCESS(map)->nr = nr_cpus; in perf_cpu_map__set_nr()
33 cpus->nr = nr_cpus; in perf_cpu_map__alloc()
34 refcount_set(&cpus->refcnt, 1); in perf_cpu_map__alloc()
44 RC_CHK_ACCESS(cpus)->map[0].cpu = -1; in perf_cpu_map__new_any_cpu()
49 static void cpu_map__delete(struct perf_cpu_map *map) in cpu_map__delete() argument
51 if (map) { in cpu_map__delete()
52 WARN_ONCE(refcount_read(perf_cpu_map__refcnt(map)) != 0, in cpu_map__delete()
54 RC_CHK_FREE(map); in cpu_map__delete()
[all …]
H A Devlist.c1 // SPDX-License-Identifier: GPL-2.0
30 INIT_LIST_HEAD(&evlist->entries); in perf_evlist__init()
31 evlist->nr_entries = 0; in perf_evlist__init()
32 fdarray__init(&evlist->pollfd, 64); in perf_evlist__init()
39 if (perf_cpu_map__is_empty(evsel->cpus)) { in __perf_evlist__propagate_maps()
40 if (perf_cpu_map__is_empty(evsel->pmu_cpus)) { in __perf_evlist__propagate_maps()
42 * Assume the unset PMU cpus were for a system-wide in __perf_evlist__propagate_maps()
45 evsel->pmu_cpus = perf_cpu_map__new_online_cpus(); in __perf_evlist__propagate_maps()
47 if (evlist->has_user_cpus && !evsel->system_wide) { in __perf_evlist__propagate_maps()
52 evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus); in __perf_evlist__propagate_maps()
[all …]
/linux/tools/lib/perf/include/perf/
H A Dcpumap.h1 /* SPDX-License-Identifier: GPL-2.0 */
9 /** A wrapper around a CPU to avoid confusion with the perf_cpu_map's map's indices. */
11 int16_t cpu; member
22 * perf_cpu_map__new_any_cpu - a map with a singular "any CPU"/dummy -1 value.
26 * perf_cpu_map__new_online_cpus - a map read from
27 * /sys/devices/system/cpu/online if
28 * available. If reading wasn't possible a map
35 * perf_cpu_map__new - create a map from the given cpu_list such as "0-7". If no
40 /** perf_cpu_map__new_int - create a map with the one given cpu. */
41 LIBPERF_API struct perf_cpu_map *perf_cpu_map__new_int(int cpu);
[all …]
/linux/tools/perf/util/
H A Dcpumap.c1 // SPDX-License-Identifier: GPL-2.0
22 * CPU number.
34 return (data->mask32_data.long_size == 4) in perf_record_cpu_map_data__test_bit()
35 ? (bit_word32 < data->mask32_data.nr) && in perf_record_cpu_map_data__test_bit()
36 (data->mask32_data.mask[bit_word32] & bit_mask32) != 0 in perf_record_cpu_map_data__test_bit()
37 : (bit_word64 < data->mask64_data.nr) && in perf_record_cpu_map_data__test_bit()
38 (data->mask64_data.mask[bit_word64] & bit_mask64) != 0; in perf_record_cpu_map_data__test_bit()
41 /* Read ith mask value from data into the given 64-bi
67 struct perf_cpu_map *map; cpu_map__from_entries() local
93 struct perf_cpu_map *map; cpu_map__from_mask() local
106 int cpu; cpu_map__from_mask() local
118 struct perf_cpu_map *map; cpu_map__from_range() local
129 for (int cpu = data->range_cpu_data.start_cpu; cpu <= data->range_cpu_data.end_cpu; cpu_map__from_range() local
151 cpu_map__fprintf(struct perf_cpu_map * map,FILE * fp) cpu_map__fprintf() argument
188 cpu__get_topology_int(int cpu,const char * name,int * value) cpu__get_topology_int() argument
198 cpu__get_socket_id(struct perf_cpu cpu) cpu__get_socket_id() argument
204 aggr_cpu_id__socket(struct perf_cpu cpu,void * data __maybe_unused) aggr_cpu_id__socket() argument
240 struct perf_cpu cpu; cpu_aggr_map__new() local
282 cpu__get_die_id(struct perf_cpu cpu) cpu__get_die_id() argument
289 aggr_cpu_id__die(struct perf_cpu cpu,void * data) aggr_cpu_id__die() argument
312 cpu__get_cluster_id(struct perf_cpu cpu) cpu__get_cluster_id() argument
319 aggr_cpu_id__cluster(struct perf_cpu cpu,void * data) aggr_cpu_id__cluster() argument
336 cpu__get_core_id(struct perf_cpu cpu) cpu__get_core_id() argument
342 aggr_cpu_id__core(struct perf_cpu cpu,void * data) aggr_cpu_id__core() argument
361 aggr_cpu_id__cpu(struct perf_cpu cpu,void * data) aggr_cpu_id__cpu() argument
375 aggr_cpu_id__node(struct perf_cpu cpu,void * data __maybe_unused) aggr_cpu_id__node() argument
383 aggr_cpu_id__global(struct perf_cpu cpu,void * data __maybe_unused) aggr_cpu_id__global() argument
518 cpu__get_node(struct perf_cpu cpu) cpu__get_node() argument
551 unsigned int cpu, mem; cpu__setup_cpunode_map() local
600 cpu_map__snprint(struct perf_cpu_map * map,char * buf,size_t size) cpu_map__snprint() argument
609 struct perf_cpu cpu = { .cpu = INT_MAX }; cpu_map__snprint() local
654 cpu_map__snprint_mask(struct perf_cpu_map * map,char * buf,size_t size) cpu_map__snprint_mask() argument
673 for (int cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) { cpu_map__snprint_mask() local
[all...]
H A Dmmap.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
5 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
34 len = bitmap_scnprintf(mask->bits, mask->nbits, buf, MASK_SIZE); in mmap_cpu_mask__scnprintf()
36 pr_debug("%p: %s mask[%zd]: %s\n", mask, tag, mask->nbits, buf); in mmap_cpu_mask__scnprintf()
39 size_t mmap__mmap_len(struct mmap *map) in mmap__mmap_len() argument
71 perf_mmap__aio_enabled(struct mmap * map) perf_mmap__aio_enabled() argument
77 perf_mmap__aio_alloc(struct mmap * map,int idx) perf_mmap__aio_alloc() argument
89 perf_mmap__aio_free(struct mmap * map,int idx) perf_mmap__aio_free() argument
97 perf_mmap__aio_bind(struct mmap * map,int idx,struct perf_cpu cpu,int affinity) perf_mmap__aio_bind() argument
126 perf_mmap__aio_alloc(struct mmap * map,int idx) perf_mmap__aio_alloc() argument
135 perf_mmap__aio_free(struct mmap * map,int idx) perf_mmap__aio_free() argument
147 perf_mmap__aio_mmap(struct mmap * map,struct mmap_params * mp) perf_mmap__aio_mmap() argument
201 perf_mmap__aio_munmap(struct mmap * map) perf_mmap__aio_munmap() argument
229 mmap__munmap(struct mmap * map) mmap__munmap() argument
246 struct perf_cpu cpu; build_node_mask() local
261 perf_mmap__setup_affinity_mask(struct mmap * map,struct mmap_params * mp) perf_mmap__setup_affinity_mask() argument
276 mmap__mmap(struct mmap * map,struct mmap_params * mp,int fd,struct perf_cpu cpu) mmap__mmap() argument
320 perf_mmap__push(struct mmap * md,void * to,int push (struct mmap * map,void * to,void * buf,size_t size)) perf_mmap__push() argument
[all...]
/linux/kernel/bpf/
H A Dcpumap.c1 // SPDX-License-Identifier: GPL-2.0-only
8 * DOC: cpu map
9 * The 'cpumap' is primarily used as a backend map for XDP BPF helper
13 * this map type redirects raw XDP frames to another CPU. The remote
14 * CPU will do SKB-allocation and call the normal network stack.
20 * basically allows for 10G wirespeed pre-filtering via bpf.
39 /* General idea: XDP packets getting XDP redirected to another CPU,
40 * will maximum be stored/queued for one driver ->poll() call. It is
42 * same CPU. Thus, cpu_map_flush operation can deduct via this_cpu_ptr()
46 #define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */
[all …]
H A Dhashtab.c1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
48 * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
52 * by pinning the task to the current CPU and incrementing the recursion
53 * protection across the map operation.
74 * it is only safe to use raw spinlock for preallocated hash map on a RT kernel,
76 * after hash map wa
89 struct bpf_map map; global() member
199 fd_htab_map_get_ptr(const struct bpf_map * map,struct htab_elem * l) fd_htab_map_get_ptr() argument
255 int cpu; htab_free_prealloced_fields() local
389 int cpu; alloc_extra_elems() local
657 __htab_map_lookup_elem(struct bpf_map * map,void * key) __htab_map_lookup_elem() argument
678 htab_map_lookup_elem(struct bpf_map * map,void * key) htab_map_lookup_elem() argument
699 htab_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf) htab_map_gen_lookup() argument
714 __htab_lru_map_lookup_elem(struct bpf_map * map,void * key,const bool mark) __htab_lru_map_lookup_elem() argument
728 htab_lru_map_lookup_elem(struct bpf_map * map,void * key) htab_lru_map_lookup_elem() argument
733 htab_lru_map_lookup_elem_sys(struct bpf_map * map,void * key) htab_lru_map_lookup_elem_sys() argument
738 htab_lru_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf) htab_lru_map_gen_lookup() argument
771 int cpu; check_and_free_fields() local
818 htab_map_get_next_key(struct bpf_map * map,void * key,void * next_key) htab_map_get_next_key() argument
887 struct bpf_map *map = &htab->map; htab_put_fd_value() local
947 int off = 0, cpu; pcpu_copy_value() local
966 int cpu; pcpu_init_value() local
1078 htab_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags) htab_map_update_elem() argument
1186 htab_lru_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags) htab_lru_map_update_elem() argument
1253 htab_map_update_elem_in_place(struct bpf_map * map,void * key,void * value,u64 map_flags,bool percpu,bool onallcpus) htab_map_update_elem_in_place() argument
1317 __htab_lru_percpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags,bool onallcpus) __htab_lru_percpu_map_update_elem() argument
1387 htab_percpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags) htab_percpu_map_update_elem() argument
1393 htab_lru_percpu_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags) htab_lru_percpu_map_update_elem() argument
1401 htab_map_delete_elem(struct bpf_map * map,void * key) htab_map_delete_elem() argument
1437 htab_lru_map_delete_elem(struct bpf_map * map,void * key) htab_lru_map_delete_elem() argument
1517 htab_map_free_timers_and_wq(struct bpf_map * map) htab_map_free_timers_and_wq() argument
1531 htab_map_free(struct bpf_map * map) htab_map_free() argument
1561 htab_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m) htab_map_seq_show_elem() argument
1582 __htab_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,bool is_lru_map,bool is_percpu,u64 flags) __htab_map_lookup_and_delete_elem() argument
1613 int off = 0, cpu; __htab_map_lookup_and_delete_elem() local
1646 htab_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags) htab_map_lookup_and_delete_elem() argument
1653 htab_percpu_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags) htab_percpu_map_lookup_and_delete_elem() argument
1661 htab_lru_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags) htab_lru_map_lookup_and_delete_elem() argument
1668 htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map * map,void * key,void * value,u64 flags) htab_lru_percpu_map_lookup_and_delete_elem() argument
1677 __htab_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr,bool do_delete,bool is_lru_map,bool is_percpu) __htab_map_lookup_and_delete_batch() argument
1805 int off = 0, cpu; __htab_map_lookup_and_delete_batch() local
1908 htab_percpu_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr) htab_percpu_map_lookup_batch() argument
1916 htab_percpu_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr) htab_percpu_map_lookup_and_delete_batch() argument
1925 htab_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr) htab_map_lookup_batch() argument
1933 htab_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr) htab_map_lookup_and_delete_batch() argument
1942 htab_lru_percpu_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr) htab_lru_percpu_map_lookup_batch() argument
1951 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr) htab_lru_percpu_map_lookup_and_delete_batch() argument
1960 htab_lru_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr) htab_lru_map_lookup_batch() argument
1968 htab_lru_map_lookup_and_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr) htab_lru_map_lookup_and_delete_batch() argument
1977 struct bpf_map *map; global() member
2067 struct bpf_map *map = info->map; __bpf_hash_map_seq_show() local
2069 int ret = 0, off = 0, cpu; __bpf_hash_map_seq_show() local
2118 struct bpf_map *map = aux->map; bpf_iter_init_hash_map() local
2160 bpf_for_each_hash_elem(struct bpf_map * map,bpf_callback_t callback_fn,void * callback_ctx,u64 flags) bpf_for_each_hash_elem() argument
2213 htab_map_mem_usage(const struct bpf_map * map) htab_map_mem_usage() argument
2296 htab_percpu_map_lookup_elem(struct bpf_map * map,void * key) htab_percpu_map_lookup_elem() argument
2307 htab_percpu_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf) htab_percpu_map_gen_lookup() argument
2326 htab_percpu_map_lookup_percpu_elem(struct bpf_map * map,void * key,u32 cpu) htab_percpu_map_lookup_percpu_elem() argument
2340 htab_lru_percpu_map_lookup_elem(struct bpf_map * map,void * key) htab_lru_percpu_map_lookup_elem() argument
2352 htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map * map,void * key,u32 cpu) htab_lru_percpu_map_lookup_percpu_elem() argument
2368 bpf_percpu_hash_copy(struct bpf_map * map,void * key,void * value) bpf_percpu_hash_copy() argument
2373 int cpu, off = 0; bpf_percpu_hash_copy() local
2400 bpf_percpu_hash_update(struct bpf_map * map,void * key,void * value,u64 map_flags) bpf_percpu_hash_update() argument
2418 htab_percpu_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m) htab_percpu_map_seq_show_elem() argument
2423 int cpu; htab_percpu_map_seq_show_elem() local
2495 fd_htab_map_free(struct bpf_map * map) fd_htab_map_free() argument
2517 bpf_fd_htab_map_lookup_elem(struct bpf_map * map,void * key,u32 * value) bpf_fd_htab_map_lookup_elem() argument
2537 bpf_fd_htab_map_update_elem(struct bpf_map * map,struct file * map_file,void * key,void * value,u64 map_flags) bpf_fd_htab_map_update_elem() argument
2562 struct bpf_map *map, *inner_map_meta; htab_of_map_alloc() local
2579 htab_of_map_lookup_elem(struct bpf_map * map,void * key) htab_of_map_lookup_elem() argument
2589 htab_of_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf) htab_of_map_gen_lookup() argument
2607 htab_of_map_free(struct bpf_map * map) htab_of_map_free() argument
[all...]
H A Dlocal_storage.c1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/bpf-cgroup.h>
16 #include "../cgroup/cgroup-internal.h"
22 struct bpf_map map; member
29 static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map) in map_to_storage() argument
31 return container_of(map, struct bpf_cgroup_storage_map, map); in map_to_storage()
34 static bool attach_type_isolated(const struct bpf_map *map) in attach_type_isolated() argument
39 bpf_cgroup_storage_key_cmp(const struct bpf_cgroup_storage_map * map,const void * _key1,const void * _key2) bpf_cgroup_storage_key_cmp() argument
67 cgroup_storage_lookup(struct bpf_cgroup_storage_map * map,void * key,bool locked) cgroup_storage_lookup() argument
102 cgroup_storage_insert(struct bpf_cgroup_storage_map * map,struct bpf_cgroup_storage * storage) cgroup_storage_insert() argument
134 struct bpf_cgroup_storage_map *map = map_to_storage(_map); cgroup_storage_lookup_elem() local
144 cgroup_storage_update_elem(struct bpf_map * map,void * key,void * value,u64 flags) cgroup_storage_update_elem() argument
185 struct bpf_cgroup_storage_map *map = map_to_storage(_map); bpf_percpu_cgroup_storage_copy() local
187 int cpu, off = 0; bpf_percpu_cgroup_storage_copy() local
214 struct bpf_cgroup_storage_map *map = map_to_storage(_map); bpf_percpu_cgroup_storage_update() local
216 int cpu, off = 0; bpf_percpu_cgroup_storage_update() local
248 struct bpf_cgroup_storage_map *map = map_to_storage(_map); cgroup_storage_get_next_key() local
289 struct bpf_cgroup_storage_map *map; cgroup_storage_map_alloc() local
332 struct bpf_cgroup_storage_map *map = map_to_storage(_map); cgroup_storage_map_free() local
351 cgroup_storage_delete_elem(struct bpf_map * map,void * key) cgroup_storage_delete_elem() argument
356 cgroup_storage_check_btf(const struct bpf_map * map,const struct btf * btf,const struct btf_type * key_type,const struct btf_type * value_type) cgroup_storage_check_btf() argument
407 cgroup_storage_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m) cgroup_storage_seq_show_elem() argument
412 int cpu; cgroup_storage_seq_show_elem() local
442 cgroup_storage_map_usage(const struct bpf_map * map) cgroup_storage_map_usage() argument
475 bpf_cgroup_storage_calculate_size(struct bpf_map * map,u32 * pages) bpf_cgroup_storage_calculate_size() argument
497 struct bpf_map *map; bpf_cgroup_storage_alloc() local
554 struct bpf_map *map; bpf_cgroup_storage_free() local
571 struct bpf_cgroup_storage_map *map; bpf_cgroup_storage_link() local
590 struct bpf_cgroup_storage_map *map; bpf_cgroup_storage_unlink() local
[all...]
H A Darraymap.c1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
27 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_free_percpu()
28 free_percpu(array->pptrs[i]); in bpf_array_free_percpu()
38 for (i = 0; i < array->map in bpf_array_alloc_percpu()
166 array_map_lookup_elem(struct bpf_map * map,void * key) array_map_lookup_elem() argument
177 array_map_direct_value_addr(const struct bpf_map * map,u64 * imm,u32 off) array_map_direct_value_addr() argument
191 array_map_direct_value_meta(const struct bpf_map * map,u64 imm,u32 * off) array_map_direct_value_meta() argument
208 array_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf) array_map_gen_lookup() argument
241 percpu_array_map_lookup_elem(struct bpf_map * map,void * key) percpu_array_map_lookup_elem() argument
253 percpu_array_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf) percpu_array_map_gen_lookup() argument
284 percpu_array_map_lookup_percpu_elem(struct bpf_map * map,void * key,u32 cpu) percpu_array_map_lookup_percpu_elem() argument
298 bpf_percpu_array_copy(struct bpf_map * map,void * key,void * value) bpf_percpu_array_copy() argument
303 int cpu, off = 0; bpf_percpu_array_copy() local
326 array_map_get_next_key(struct bpf_map * map,void * key,void * next_key) array_map_get_next_key() argument
345 array_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags) array_map_update_elem() argument
384 bpf_percpu_array_update(struct bpf_map * map,void * key,void * value,u64 map_flags) bpf_percpu_array_update() argument
390 int cpu, off = 0; bpf_percpu_array_update() local
424 array_map_delete_elem(struct bpf_map * map,void * key) array_map_delete_elem() argument
434 array_map_free_timers_wq(struct bpf_map * map) array_map_free_timers_wq() argument
453 array_map_free(struct bpf_map * map) array_map_free() argument
462 int cpu; array_map_free() local
484 array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m) array_map_seq_show_elem() argument
505 percpu_array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m) percpu_array_map_seq_show_elem() argument
511 int cpu; percpu_array_map_seq_show_elem() local
528 array_map_check_btf(const struct bpf_map * map,const struct btf * btf,const struct btf_type * key_type,const struct btf_type * value_type) array_map_check_btf() argument
555 array_map_mmap(struct bpf_map * map,struct vm_area_struct * vma) array_map_mmap() argument
581 struct bpf_map *map; global() member
589 struct bpf_map *map = info->map; bpf_array_map_seq_start() local
608 struct bpf_map *map = info->map; bpf_array_map_seq_next() local
628 struct bpf_map *map = info->map; __bpf_array_map_seq_show() local
632 int off = 0, cpu = 0; __bpf_array_map_seq_show() local
679 struct bpf_map *map = aux->map; bpf_iter_init_array_map() local
724 bpf_for_each_array_elem(struct bpf_map * map,bpf_callback_t callback_fn,void * callback_ctx,u64 flags) bpf_for_each_array_elem() argument
757 array_map_mem_usage(const struct bpf_map * map) array_map_mem_usage() argument
838 fd_array_map_free(struct bpf_map * map) fd_array_map_free() argument
850 fd_array_map_lookup_elem(struct bpf_map * map,void * key) fd_array_map_lookup_elem() argument
856 bpf_fd_array_map_lookup_elem(struct bpf_map * map,void * key,u32 * value) bpf_fd_array_map_lookup_elem() argument
876 bpf_fd_array_map_update_elem(struct bpf_map * map,struct file * map_file,void * key,void * value,u64 map_flags) bpf_fd_array_map_update_elem() argument
908 __fd_array_map_delete_elem(struct bpf_map * map,void * key,bool need_defer) __fd_array_map_delete_elem() argument
934 fd_array_map_delete_elem(struct bpf_map * map,void * key) fd_array_map_delete_elem() argument
939 prog_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd) prog_fd_array_get_ptr() argument
972 prog_fd_array_put_ptr(struct bpf_map * map,void * ptr,bool need_defer) prog_fd_array_put_ptr() argument
989 bpf_fd_array_map_clear(struct bpf_map * map,bool need_defer) bpf_fd_array_map_clear() argument
998 prog_array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m) prog_array_map_seq_show_elem() argument
1026 prog_array_map_poke_track(struct bpf_map * map,struct bpf_prog_aux * prog_aux) prog_array_map_poke_track() argument
1059 prog_array_map_poke_untrack(struct bpf_map * map,struct bpf_prog_aux * prog_aux) prog_array_map_poke_untrack() argument
1083 prog_array_map_poke_run(struct bpf_map * map,u32 key,struct bpf_prog * old,struct bpf_prog * new) prog_array_map_poke_run() argument
1134 struct bpf_map *map = container_of(work, struct bpf_array_aux, prog_array_map_clear_deferred() local
1140 prog_array_map_clear(struct bpf_map * map) prog_array_map_clear() argument
1151 struct bpf_map *map; prog_array_map_alloc() local
1173 prog_array_map_free(struct bpf_map * map) prog_array_map_free() argument
1240 perf_event_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd) perf_event_fd_array_get_ptr() argument
1266 perf_event_fd_array_put_ptr(struct bpf_map * map,void * ptr,bool need_defer) perf_event_fd_array_put_ptr() argument
1272 perf_event_fd_array_release(struct bpf_map * map,struct file * map_file) perf_event_fd_array_release() argument
1291 perf_event_fd_array_map_free(struct bpf_map * map) perf_event_fd_array_map_free() argument
1315 cgroup_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd) cgroup_fd_array_get_ptr() argument
1322 cgroup_fd_array_put_ptr(struct bpf_map * map,void * ptr,bool need_defer) cgroup_fd_array_put_ptr() argument
1328 cgroup_fd_array_free(struct bpf_map * map) cgroup_fd_array_free() argument
1352 struct bpf_map *map, *inner_map_meta; array_of_map_alloc() local
1369 array_of_map_free(struct bpf_map * map) array_of_map_free() argument
1379 array_of_map_lookup_elem(struct bpf_map * map,void * key) array_of_map_lookup_elem() argument
1389 array_of_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf) array_of_map_gen_lookup() argument
[all...]
/linux/drivers/clocksource/
H A Dingenic-timer.c1 // SPDX-License-Identifier: GPL-2.0
14 #include <linux/mfd/ingenic-tcu.h>
23 #include <dt-bindings/clock/ingenic,tcu.h>
32 unsigned int cpu; member
40 struct regmap *map; member
56 regmap_read(tcu->map, TCU_REG_TCNTc(tcu->cs_channel), &count); in ingenic_tcu_timer_read()
69 return container_of(timer, struct ingenic_tcu, timers[timer->cpu]); in to_ingenic_tcu()
83 regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel)); in ingenic_tcu_cevt_set_state_shutdown()
95 return -EINVAL; in ingenic_tcu_cevt_set_next()
97 regmap_write(tcu->map, TCU_REG_TDFRc(timer->channel), next); in ingenic_tcu_cevt_set_next()
[all …]
/linux/lib/
H A Dcpu_rmap.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * cpu_rmap.c: CPU affinity reverse-map support
13 * objects with CPU affinities. This can be seen as a reverse-map of
14 * CPU affinity. However, we do not assume that the object affinities
17 * CPU topology.
21 * alloc_cpu_rmap - allocate CPU affinity reverse-map
28 unsigned int cpu; in alloc_cpu_rmap() local
39 rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags); in alloc_cpu_rmap()
43 kref_init(&rmap->refcount); in alloc_cpu_rmap()
44 rmap->obj = (void **)((char *)rmap + obj_offset); in alloc_cpu_rmap()
[all …]
/linux/Documentation/bpf/
H A Dmap_hash.rst1 .. SPDX-License-Identifier: GPL-2.0-only
3 .. Copyright (C) 2022-2023 Isovalent, Inc.
10 - ``BPF_MAP_TYPE_HASH`` was introduced in kernel version 3.19
11 - ``BPF_MAP_TYPE_PERCPU_HASH`` was introduced in version 4.6
12 - Both ``BPF_MAP_TYPE_LRU_HASH`` and ``BPF_MAP_TYPE_LRU_PERCPU_HASH``
16 purpose hash map storage. Both the key and the value can be structs,
20 to the max_entries limit that you specify. Hash maps use pre-allocation
22 used to disable pre-allocation when it is too memory expensive.
25 CPU. The per-cpu values are stored internally in an array.
32 shared across CPUs but it is possible to request a per CPU LRU list with
[all …]
H A Dmap_cpumap.rst1 .. SPDX-License-Identifier: GPL-2.0-only
9 - ``BPF_MAP_TYPE_CPUMAP`` was introduced in kernel version 4.15
11 .. kernel-doc:: kernel/bpf/cpumap.c
12 :doc: cpu map
14 An example use-case for this map type is software based Receive Side Scaling (RSS).
16 The CPUMAP represents the CPUs in the system indexed as the map-key, and the
17 map-value is the config setting (per CPUMAP entry). Each CPUMAP entry has a dedicated
18 kernel thread bound to the given CPU to represent the remote CPU execution unit.
21 on the remote CPU. This allows an XDP program to split its processing across
22 multiple CPUs. For example, a scenario where the initial CPU (that sees/receives
[all …]
H A Dmap_cgroup_storage.rst1 .. SPDX-License-Identifier: GPL-2.0-only
8 The ``BPF_MAP_TYPE_CGROUP_STORAGE`` map type represents a local fix-sized
13 The map provide a local storage at the cgroup that the BPF program is attached
19 ``BPF_MAP_TYPE_CGROUP_STORAGE`` map type. Some of its behaviors was changed in
25 The map uses key of type of either ``__u64 cgroup_inode_id`` or
38 map will share the same storage. Otherwise, if the type is
44 void *bpf_get_local_storage(void *map, u64 flags)
75 Userspace accessing map declared above::
80 __u32 map_lookup(struct bpf_map *map, __u64 cgrp, enum bpf_attach_type type)
87 bpf_map_lookup_elem(bpf_map__fd(map), &key, &value);
[all …]
/linux/samples/bpf/
H A Dmap_perf_test_user.c1 // SPDX-License-Identifier: GPL-2.0-only
82 static void test_hash_prealloc(int cpu) in test_hash_prealloc() argument
90 printf("%d:hash_map_perf pre-alloc %lld events per sec\n", in test_hash_prealloc()
91 cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time)); in test_hash_prealloc()
106 * It is fine that the user requests for a map with in pre_test_lru_hash_lookup()
108 * may return not found. For LRU map, we are not interested in pre_test_lru_hash_lookup()
109 * in such small map performance. in pre_test_lru_hash_lookup()
120 static void do_test_lru(enum test_type test, int cpu) in do_test_lru() argument
129 if (test == INNER_LRU_HASH_PREALLOC && cpu) { in do_test_lru()
130 /* If CPU is not 0, create inner_lru hash map and insert the fd in do_test_lru()
[all …]
/linux/drivers/gpu/drm/i915/gem/selftests/
H A Di915_gem_coherency.c2 * SPDX-License-Identifier: MIT
27 u32 *cpu; in cpu_set() local
30 i915_gem_object_lock(ctx->obj, NULL); in cpu_set()
31 err = i915_gem_object_prepare_write(ctx->obj, &needs_clflush); in cpu_set()
35 page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT); in cpu_set()
36 cpu = kmap_local_page(page) + offset_in_page(offset); in cpu_set()
39 drm_clflush_virt_range(cpu, sizeof(*cpu)); in cpu_set()
41 *cpu = v; in cpu_set()
44 drm_clflush_virt_range(cpu, sizeof(*cpu)); in cpu_set()
46 kunmap_local(cpu); in cpu_set()
[all …]
/linux/arch/sh/kernel/cpu/sh4/
H A Dsq.c1 // SPDX-License-Identifier: GPL-2.0
3 * arch/sh/kernel/cpu/sh4/sq.c
5 * General management API for SH-4 integrated Store Queues
7 * Copyright (C) 2001 - 2006 Paul Mundt
11 #include <linux/cpu.h>
23 #include <cpu/sq.h>
50 * sq_flush_range - Flush (prefetch) a specific SQ range
62 for (len >>= 5; len--; sq += 8) in sq_flush_range()
70 static inline void sq_mapping_list_add(struct sq_mapping *map) in sq_mapping_list_add() argument
78 p = &tmp->next; in sq_mapping_list_add()
[all …]
/linux/arch/x86/kernel/cpu/
H A Dtopology.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPU/APIC topology
24 #define pr_fmt(fmt) "CPU topo: " fmt
25 #include <linux/cpu.h>
36 #include "cpu.h"
39 * Map cpu index to physical APIC ID
49 /* Used for CPU number allocation and parallel CPU bringup */
50 u32 cpuid_to_apicid[] __ro_after_init = { [0 ... NR_CPUS - 1] = BAD_APICID, };
53 static struct { DECLARE_BITMAP(map, MAX_LOCAL_APIC); } apic_maps[TOPO_MAX_DOMAIN] __ro_after_init;
57 * with 1 as CPU #0 is reserved for the boot CPU.
[all …]
/linux/drivers/dma/
H A Dcv1800b-dmamux.c1 // SPDX-License-Identifier: GPL-2.0
73 unsigned int cpu; member
79 struct cv1800_dmamux_map *map = route_data; in cv1800_dmamux_free() local
81 guard(spinlock_irqsave)(&dmamux->lock); in cv1800_dmamux_free()
83 regmap_update_bits(dmamux->regmap, in cv1800_dmamux_free()
84 DMAMUX_CH_REG(map->channel), in cv1800_dmamux_free()
85 DMAMUX_CH_MASK(map->channel), in cv1800_dmamux_free()
88 regmap_update_bits(dmamux->regmap, REG_DMA_INT_MUX, in cv1800_dmamux_free()
89 DMAMUX_INT_CH_MASK(map->channel, map->cpu), in cv1800_dmamux_free()
90 DMAMUX_INTEN_BIT(map->cpu)); in cv1800_dmamux_free()
[all …]
/linux/arch/arc/plat-axs10x/
H A Daxs10x.c1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
11 #include <asm/asm-offsets.h>
30 * Peripherals on CPU Card and Mother Board are wired to cpu intc via in axs10x_enable_gpio_intc_wire()
33 * --------------------- in axs10x_enable_gpio_intc_wire()
34 * | snps,arc700-intc | in axs10x_enable_gpio_intc_wire()
35 * --------------------- in axs10x_enable_gpio_intc_wire()
37 * ------------------- ------------------- in axs10x_enable_gpio_intc_wire()
38 * | snps,dw-apb-gpio | | snps,dw-apb-gpio | in axs10x_enable_gpio_intc_wire()
39 * ------------------- ------------------- in axs10x_enable_gpio_intc_wire()
[all …]
/linux/arch/csky/mm/
H A Dasid.c1 // SPDX-License-Identifier: GPL-2.0
7 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
16 #define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu) argument
18 #define ASID_MASK(info) (~GENMASK((info)->bits - 1, 0))
19 #define ASID_FIRST_VERSION(info) (1UL << ((info)->bits))
21 #define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift)
22 #define idx2asid(info, idx) (((idx) << (info)->ctxt_shift) & ~ASID_MASK(info))
30 bitmap_zero(info->map, NUM_CTXT_ASIDS(info)); in flush_context()
35 * If this CPU has already been through a in flush_context()
43 __set_bit(asid2idx(info, asid), info->map); in flush_context()
[all …]
/linux/tools/perf/util/bpf_skel/
H A Dkwork_trace.bpf.c1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
23 __u32 cpu; member
82 ret = (unsigned char)s1[i] - (unsigned char)s2[i]; in local_strncmp()
95 __u32 cpu = bpf_get_smp_processor_id(); in trace_event_match() local
101 cpu_val = bpf_map_lookup_elem(&perf_kwork_cpu_filter, &cpu); in trace_event_match()
117 do_update_time(void * map,struct work_key * key,__u64 time_start,__u64 time_end) do_update_time() argument
146 do_update_timestart(void * map,struct work_key * key) do_update_timestart() argument
164 do_update_name(void * map,struct work_key * key,char * name) do_update_name() argument
171 update_timestart(void * map,struct work_key * key) update_timestart() argument
[all...]
/linux/drivers/irqchip/
H A Dirq-hip04.c1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2002-2014 ARM Limited.
6 * Copyright (c) 2013-2014 HiSilicon Ltd.
7 * Copyright (c) 2013-2014 Linaro Ltd.
14 * o There is one CPU Interface per CPU, which sends interrupts sent
16 * associated CPU. The base address of the CPU interface is usually
18 * on the CPU it is accessed from.
20 * Note that IRQs 0-31 are special - they are local to each CPU.
22 * registers are banked per-cpu for these sources.
31 #include <linux/cpu.h>
[all …]

12345678910>>...45