Home
last modified time | relevance | path

Searched full:counters (Results 1 – 25 of 1544) sorted by relevance

12345678910>>...62

/linux/drivers/infiniband/core/
H A Duverbs_std_types_counters.c42 struct ib_counters *counters = uobject->object; in uverbs_free_counters() local
45 if (atomic_read(&counters->usecnt)) in uverbs_free_counters()
48 ret = counters->device->ops.destroy_counters(counters); in uverbs_free_counters()
51 kfree(counters); in uverbs_free_counters()
61 struct ib_counters *counters; in UVERBS_HANDLER() local
72 counters = rdma_zalloc_drv_obj(ib_dev, ib_counters); in UVERBS_HANDLER()
73 if (!counters) in UVERBS_HANDLER()
76 counters->device = ib_dev; in UVERBS_HANDLER()
77 counters->uobject = uobj; in UVERBS_HANDLER()
78 uobj->object = counters; in UVERBS_HANDLER()
[all …]
/linux/net/netfilter/
H A Dxt_connbytes.c30 const struct nf_conn_counter *counters; in connbytes_mt() local
40 counters = acct->counter; in connbytes_mt()
45 what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets); in connbytes_mt()
48 what = atomic64_read(&counters[IP_CT_DIR_REPLY].packets); in connbytes_mt()
51 what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets); in connbytes_mt()
52 what += atomic64_read(&counters[IP_CT_DIR_REPLY].packets); in connbytes_mt()
59 what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes); in connbytes_mt()
62 what = atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); in connbytes_mt()
65 what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes); in connbytes_mt()
66 what += atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); in connbytes_mt()
[all …]
/linux/lib/
H A Dpercpu_counter.c3 * Fast batching percpu counters.
67 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_set()
83 * the this_cpu_add(), and the interrupt updates this_cpu(*fbc->counters),
98 count = this_cpu_read(*fbc->counters); in percpu_counter_add_batch()
106 count = __this_cpu_read(*fbc->counters); in percpu_counter_add_batch()
108 __this_cpu_sub(*fbc->counters, count); in percpu_counter_add_batch()
112 } while (!this_cpu_try_cmpxchg(*fbc->counters, &count, count + amount)); in percpu_counter_add_batch()
126 count = __this_cpu_read(*fbc->counters) + amount; in percpu_counter_add_batch()
130 __this_cpu_sub(*fbc->counters, count - amount); in percpu_counter_add_batch()
133 this_cpu_add(*fbc->counters, amount); in percpu_counter_add_batch()
[all …]
/linux/tools/perf/pmu-events/arch/x86/elkhartlake/
H A Dmemory.json16 …: "Counts the number of misaligned load uops that are 4K page splits. Available PDIST counters: 0",
26 … "Counts the number of misaligned store uops that are 4K page splits. Available PDIST counters: 0",
37 …ublicDescription": "Counts all code reads that were supplied by DRAM. Available PDIST counters: 0",
48 …tion": "Counts all code reads that were not supplied by the L3 cache. Available PDIST counters: 0",
59 …tion": "Counts all code reads that were not supplied by the L3 cache. Available PDIST counters: 0",
70 …ublicDescription": "Counts all code reads that were supplied by DRAM. Available PDIST counters: 0",
81 …ks from L1 cache and L2 cache that were not supplied by the L3 cache. Available PDIST counters: 0",
92 …ks from L1 cache and L2 cache that were not supplied by the L3 cache. Available PDIST counters: 0",
103 …tches and L1 instruction cache prefetches that were supplied by DRAM. Available PDIST counters: 0",
114 … instruction cache prefetches that were not supplied by the L3 cache. Available PDIST counters: 0",
[all …]
H A Dcache.json164 …scription": "Counts the number of load uops retired that hit in DRAM. Available PDIST counters: 0",
175 …required and modified data was forwarded from another core or module. Available PDIST counters: 0",
186 …Counts the number of load uops retired that hit in the L1 data cache. Available PDIST counters: 0",
197 …ounts the number of load uops retired that miss in the L1 data cache. Available PDIST counters: 0",
208 …n": "Counts the number of load uops retired that hit in the L2 cache. Available PDIST counters: 0",
219 …": "Counts the number of load uops retired that miss in the L2 cache. Available PDIST counters: 0",
230 …n": "Counts the number of load uops retired that hit in the L3 cache. Available PDIST counters: 0",
241 … load AND a store will be counted as 1, not 2 (e.g. ADD [mem], CONST) Available PDIST counters: 0",
252 … "PublicDescription": "Counts the total number of load uops retired. Available PDIST counters: 0",
263 … "PublicDescription": "Counts the total number of store uops retired. Available PDIST counters: 0",
[all …]
/linux/tools/perf/pmu-events/arch/x86/snowridgex/
H A Dmemory.json16 …: "Counts the number of misaligned load uops that are 4K page splits. Available PDIST counters: 0",
26 … "Counts the number of misaligned store uops that are 4K page splits. Available PDIST counters: 0",
37 …ublicDescription": "Counts all code reads that were supplied by DRAM. Available PDIST counters: 0",
48 …tion": "Counts all code reads that were not supplied by the L3 cache. Available PDIST counters: 0",
59 …tion": "Counts all code reads that were not supplied by the L3 cache. Available PDIST counters: 0",
70 …ublicDescription": "Counts all code reads that were supplied by DRAM. Available PDIST counters: 0",
81 …ks from L1 cache and L2 cache that were not supplied by the L3 cache. Available PDIST counters: 0",
92 …ks from L1 cache and L2 cache that were not supplied by the L3 cache. Available PDIST counters: 0",
103 …tches and L1 instruction cache prefetches that were supplied by DRAM. Available PDIST counters: 0",
114 … instruction cache prefetches that were not supplied by the L3 cache. Available PDIST counters: 0",
[all …]
H A Dcache.json164 …scription": "Counts the number of load uops retired that hit in DRAM. Available PDIST counters: 0",
175 …required and modified data was forwarded from another core or module. Available PDIST counters: 0",
186 …Counts the number of load uops retired that hit in the L1 data cache. Available PDIST counters: 0",
197 …ounts the number of load uops retired that miss in the L1 data cache. Available PDIST counters: 0",
208 …n": "Counts the number of load uops retired that hit in the L2 cache. Available PDIST counters: 0",
219 …": "Counts the number of load uops retired that miss in the L2 cache. Available PDIST counters: 0",
230 …n": "Counts the number of load uops retired that hit in the L3 cache. Available PDIST counters: 0",
241 … load AND a store will be counted as 1, not 2 (e.g. ADD [mem], CONST) Available PDIST counters: 0",
252 … "PublicDescription": "Counts the total number of load uops retired. Available PDIST counters: 0",
263 … "PublicDescription": "Counts the total number of store uops retired. Available PDIST counters: 0",
[all …]
/linux/tools/testing/selftests/net/tcp_ao/lib/
H A Dproc.c18 struct netstat_counter *counters; member
52 ret->counters = NULL; in lookup_get()
81 type->counters = reallocarray(type->counters, in netstat_read_type()
84 if (!type->counters) in netstat_read_type()
95 struct netstat_counter *nc = &type->counters[i]; in netstat_read_type()
133 type->counters = reallocarray(type->counters, i + 1, in snmp6_read()
135 if (!type->counters) in snmp6_read()
137 nc = &type->counters[i]; in snmp6_read()
196 free(ns->counters[i].name); in netstat_free()
197 free(ns->counters); in netstat_free()
[all …]
/linux/tools/perf/
H A Ddesign.txt2 Performance Counters for Linux
5 Performance counters are special hardware registers available on most modern
13 hardware capabilities. It provides per task and per CPU counters, counter
15 provides "virtual" 64-bit counters, regardless of the width of the
16 underlying hardware counters.
18 Performance counters are accessed via special file descriptors.
32 Multiple counters can be kept open at a time, and the counters
115 on all CPUs that implement Performance Counters support under Linux,
130 * Special "software" counters provided by the kernel, even if the hardware
131 * does not support performance counters. These counters measure various
[all …]
/linux/Documentation/core-api/
H A Dlocal_ops.rst30 counters. They minimize the performance cost of standard atomic operations by
34 Having fast per CPU atomic counters is interesting in many cases: it does not
36 coherent counters in NMI handlers. It is especially useful for tracing purposes
37 and for various performance monitoring counters.
95 static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0);
107 local_inc(&get_cpu_var(counters));
108 put_cpu_var(counters);
113 local_inc(this_cpu_ptr(&counters));
117 Reading the counters
120 Those local counters can be read from foreign CPUs to sum the count. Note that
[all …]
/linux/drivers/net/ethernet/aquantia/atlantic/macsec/
H A Dmacsec_api.h265 /*! Read the counters for the specified SC, and unpack them into the
266 * fields of counters.
267 * counters - [OUT] The raw table row data will be unpacked here.
271 struct aq_mss_egress_sc_counters *counters,
274 /*! Read the counters for the specified SA, and unpack them into the
275 * fields of counters.
276 * counters - [OUT] The raw table row data will be unpacked here.
280 struct aq_mss_egress_sa_counters *counters,
283 /*! Read the counters for the common egress counters, and unpack them
284 * into the fields of counters.
[all …]
H A Dmacsec_api.c1825 struct aq_mss_egress_sc_counters *counters, in get_egress_sc_counters() argument
1837 counters->sc_protected_pkts[0] = in get_egress_sc_counters()
1839 counters->sc_protected_pkts[1] = in get_egress_sc_counters()
1845 counters->sc_encrypted_pkts[0] = in get_egress_sc_counters()
1847 counters->sc_encrypted_pkts[1] = in get_egress_sc_counters()
1853 counters->sc_protected_octets[0] = in get_egress_sc_counters()
1855 counters->sc_protected_octets[1] = in get_egress_sc_counters()
1861 counters->sc_encrypted_octets[0] = in get_egress_sc_counters()
1863 counters->sc_encrypted_octets[1] = in get_egress_sc_counters()
1870 struct aq_mss_egress_sc_counters *counters, in aq_mss_get_egress_sc_counters() argument
[all …]
/linux/Documentation/translations/zh_CN/core-api/
H A Dlocal_ops.rst93 static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0);
105 local_inc(&get_cpu_var(counters));
106 put_cpu_var(counters);
110 local_inc(this_cpu_ptr(&counters));
123 sum += local_read(&per_cpu(counters, cpu));
143 static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0);
152 local_inc(this_cpu_ptr(&counters));
157 * local_inc(&get_cpu_var(counters));
158 * put_cpu_var(counters);
166 /* Increment the counters */
[all …]
/linux/samples/cgroup/
H A Dmemcg_event_listener.c43 struct memcg_counters counters; member
49 static void print_memcg_counters(const struct memcg_counters *counters) in print_memcg_counters() argument
52 printf("\tlow: %ld\n", counters->low); in print_memcg_counters()
53 printf("\thigh: %ld\n", counters->high); in print_memcg_counters()
54 printf("\tmax: %ld\n", counters->max); in print_memcg_counters()
55 printf("\toom: %ld\n", counters->oom); in print_memcg_counters()
56 printf("\toom_kill: %ld\n", counters->oom_kill); in print_memcg_counters()
57 printf("\toom_group_kill: %ld\n", counters->oom_group_kill); in print_memcg_counters()
107 struct memcg_counters *counters = &events->counters; in read_memcg_events() local
116 .old = &counters->low, in read_memcg_events()
[all …]
/linux/fs/xfs/scrub/
H A Dfscounters.c29 * FS Summary Counters
35 * Then we compare what we computed against the in-core counters.
37 * However, the reality is that summary counters are a tricky beast to check.
48 * structures as quickly as it can. We snapshot the percpu counters before and
55 * values, the percpu counters should be fairly close to each other. However,
62 * contents and trust that the incore counters match the ondisk counters. (The
64 * summary counters after checking all AG headers). Do this from the setup
145 * that comprise the summary counters and compare them to the percpu counters.
219 /* We must get the incore counters set up before we can proceed. */ in xchk_setup_fscounters()
226 * reduce the likelihood of background perturbations to the counters in xchk_setup_fscounters()
[all …]
/linux/include/linux/
H A Dnfs_iostat.h13 * These counters are not meant to be human-readable, but are meant
15 * "iostat". As such, the counters are sampled by the tools over
28 * NFS byte counters
40 * These counters give a view of the data throughput into and out
46 * These counters can also help characterize which access methods
53 * NFS page counters
58 * NB: When adding new byte counters, please include the measured
75 * NFS event counters
77 * These counters provide a low-overhead way of monitoring client
78 * activity without enabling NFS trace debugging. The counters
/linux/tools/perf/pmu-events/arch/x86/clearwaterforest/
H A Dcache.json25 "PublicDescription": "Counts the number of load ops retired. Available PDIST counters: 0,1",
34 … "PublicDescription": "Counts the number of store ops retired. Available PDIST counters: 0,1",
45 …ed in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
56 …ed in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
67 …ed in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
78 …ed in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
89 …ed in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
100 …ed in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
111 …ed in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
122 …ed in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
[all …]
/linux/net/ipv4/netfilter/
H A Darp_tables.c230 counter = xt_get_this_cpu_counter(&e->counters); in arpt_do_table()
319 e->counters.pcnt = pos; in mark_source_chains()
345 pos = e->counters.pcnt; in mark_source_chains()
346 e->counters.pcnt = 0; in mark_source_chains()
360 e->counters.pcnt = pos; in mark_source_chains()
379 e->counters.pcnt = pos; in mark_source_chains()
413 if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) in find_check_entry()
432 xt_percpu_counter_free(&e->counters); in find_check_entry()
494 /* Clear counters and comefrom */ in check_entry_size_and_hooks()
495 e->counters = ((struct xt_counters) { 0, 0 }); in check_entry_size_and_hooks()
[all …]
H A Dip_tables.c297 counter = xt_get_this_cpu_counter(&e->counters); in ipt_do_table()
383 e->counters.pcnt = pos; in mark_source_chains()
407 pos = e->counters.pcnt; in mark_source_chains()
408 e->counters.pcnt = 0; in mark_source_chains()
422 e->counters.pcnt = pos; in mark_source_chains()
441 e->counters.pcnt = pos; in mark_source_chains()
526 if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) in find_check_entry()
566 xt_percpu_counter_free(&e->counters); in find_check_entry()
629 /* Clear counters and comefrom */ in check_entry_size_and_hooks()
630 e->counters in check_entry_size_and_hooks()
740 get_counters(const struct xt_table_info * t,struct xt_counters counters[]) get_counters() argument
770 get_old_counters(const struct xt_table_info * t,struct xt_counters counters[]) get_old_counters() argument
792 struct xt_counters *counters; alloc_counters() local
816 struct xt_counters *counters; copy_entries_to_user() local
1043 struct xt_counters *counters; __do_replace() local
1213 compat_uptr_t counters; /* struct xt_counters * */ global() member
1219 compat_copy_entry_to_user(struct ipt_entry * e,void __user ** dstptr,unsigned int * size,struct xt_counters * counters,unsigned int i) compat_copy_entry_to_user() argument
1553 struct xt_counters *counters; compat_copy_entries_to_user() local
[all...]
/linux/tools/perf/pmu-events/arch/x86/graniterapids/
H A Dcache.json323 … prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW. Available PDIST counters: 0",
333 "PublicDescription": "Counts all retired store instructions. Available PDIST counters: 0",
343 …ription": "Counts all retired memory instructions - loads and stores. Available PDIST counters: 0",
353 …icDescription": "Counts retired load instructions with locked access. Available PDIST counters: 0",
366 …nts retired load instructions that split across a cacheline boundary. Available PDIST counters: 0",
379 …ts retired store instructions that split across a cacheline boundary. Available PDIST counters: 0",
392 …tired load instructions with a clean hit in the 2nd-level TLB (STLB). Available PDIST counters: 0",
405 …r of retired store instructions that hit in the 2nd-level TLB (STLB). Available PDIST counters: 0",
418 …ed load instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0",
428 …d store instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0",
[all …]
/linux/tools/perf/pmu-events/arch/x86/pantherlake/
H A Dcache.json392 …nstructions with at least one architecturally visible load retired. Available PDIST counters: 0,1",
403 "PublicDescription": "Counts all retired store instructions. Available PDIST counters: 0,1",
413 …icDescription": "Counts all retired software prefetch instructions. Available PDIST counters: 0,1",
424 …ption": "Counts all retired memory instructions - loads and stores. Available PDIST counters: 0,1",
435 …Description": "Counts retired load instructions with locked access. Available PDIST counters: 0,1",
446 …s retired load instructions that split across a cacheline boundary. Available PDIST counters: 0,1",
457 … retired store instructions that split across a cacheline boundary. Available PDIST counters: 0,1",
468 … retired instructions with a clean hit in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
479 …red load instructions with a clean hit in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
490 …of retired store instructions that hit in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
[all …]
H A Dmemory.json38 …cles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
51 …cles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
64 …cles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
77 …cles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
90 …cles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
103 …cles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
116 …cles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
129 …cles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
142 …cles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
155 …cles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
[all …]
/linux/tools/perf/tests/shell/
H A Dstat_bpf_counters.sh2 # perf stat --bpf-counters test (exclusive)
36 echo "Failed: instructions not counted with --bpf-counters"
43 printf "Testing --bpf-counters "
45 …bpf_instructions=$(perf stat --no-big-num --bpf-counters -e instructions -- $workload 2>&1 | awk …
62 # skip if --bpf-counters is not supported
63 if ! perf stat -e instructions --bpf-counters true > /dev/null 2>&1; then
65 echo "Skipping: --bpf-counters not supported"
66 perf --no-pager stat -e instructions --bpf-counters true || true
/linux/tools/perf/pmu-events/arch/x86/sapphirerapids/
H A Dcache.json377 … prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW. Available PDIST counters: 0",
387 "PublicDescription": "Counts all retired store instructions. Available PDIST counters: 0",
397 …ription": "Counts all retired memory instructions - loads and stores. Available PDIST counters: 0",
407 …icDescription": "Counts retired load instructions with locked access. Available PDIST counters: 0",
417 …nts retired load instructions that split across a cacheline boundary. Available PDIST counters: 0",
427 …ts retired store instructions that split across a cacheline boundary. Available PDIST counters: 0",
437 …ed load instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0",
447 …d store instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0",
466 …d instructions whose data sources were HitM responses from shared L3. Available PDIST counters: 0",
476 …sources were L3 hit and cross-core snoop missed in on-pkg core cache. Available PDIST counters: 0",
[all …]
/linux/net/ipv6/netfilter/
H A Dip6_tables.c320 counter = xt_get_this_cpu_counter(&e->counters); in ip6t_do_table()
401 e->counters.pcnt = pos; in mark_source_chains()
425 pos = e->counters.pcnt; in mark_source_chains()
426 e->counters.pcnt = 0; in mark_source_chains()
440 e->counters.pcnt = pos; in mark_source_chains()
459 e->counters.pcnt = pos; in mark_source_chains()
545 if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) in find_check_entry()
584 xt_percpu_counter_free(&e->counters); in find_check_entry()
647 /* Clear counters and comefrom */ in check_entry_size_and_hooks()
648 e->counters in check_entry_size_and_hooks()
757 get_counters(const struct xt_table_info * t,struct xt_counters counters[]) get_counters() argument
787 get_old_counters(const struct xt_table_info * t,struct xt_counters counters[]) get_old_counters() argument
808 struct xt_counters *counters; alloc_counters() local
832 struct xt_counters *counters; copy_entries_to_user() local
1060 struct xt_counters *counters; __do_replace() local
1229 compat_uptr_t counters; /* struct xt_counters * */ global() member
1235 compat_copy_entry_to_user(struct ip6t_entry * e,void __user ** dstptr,unsigned int * size,struct xt_counters * counters,unsigned int i) compat_copy_entry_to_user() argument
1562 struct xt_counters *counters; compat_copy_entries_to_user() local
[all...]

12345678910>>...62