| /linux/drivers/infiniband/core/ |
| H A D | uverbs_std_types_counters.c | 42 struct ib_counters *counters = uobject->object; in uverbs_free_counters() local 45 if (atomic_read(&counters->usecnt)) in uverbs_free_counters() 48 ret = counters->device->ops.destroy_counters(counters); in uverbs_free_counters() 51 kfree(counters); in uverbs_free_counters() 61 struct ib_counters *counters; in UVERBS_HANDLER() local 72 counters = rdma_zalloc_drv_obj(ib_dev, ib_counters); in UVERBS_HANDLER() 73 if (!counters) in UVERBS_HANDLER() 76 counters->device = ib_dev; in UVERBS_HANDLER() 77 counters->uobject = uobj; in UVERBS_HANDLER() 78 uobj->object = counters; in UVERBS_HANDLER() [all …]
|
| /linux/lib/ |
| H A D | percpu_counter.c | 67 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_set() 98 count = this_cpu_read(*fbc->counters); in percpu_counter_add_batch() 106 count = __this_cpu_read(*fbc->counters); in percpu_counter_add_batch() 108 __this_cpu_sub(*fbc->counters, count); in percpu_counter_add_batch() 112 } while (!this_cpu_try_cmpxchg(*fbc->counters, &count, count + amount)); in percpu_counter_add_batch() 126 count = __this_cpu_read(*fbc->counters) + amount; in percpu_counter_add_batch() 130 __this_cpu_sub(*fbc->counters, count - amount); in percpu_counter_add_batch() 133 this_cpu_add(*fbc->counters, amount); in percpu_counter_add_batch() 152 count = __this_cpu_read(*fbc->counters); in percpu_counter_sync() 154 __this_cpu_sub(*fbc->counters, count); in percpu_counter_sync() [all …]
|
| H A D | stackdepot.c | 87 static long counters[DEPOT_COUNTER_COUNT]; variable 413 counters[DEPOT_COUNTER_FREELIST_SIZE]--; in depot_pop_free() 470 counters[DEPOT_COUNTER_REFD_ALLOCS]++; in depot_alloc_stack() 471 counters[DEPOT_COUNTER_REFD_INUSE]++; in depot_alloc_stack() 475 counters[DEPOT_COUNTER_PERSIST_COUNT]++; in depot_alloc_stack() 476 counters[DEPOT_COUNTER_PERSIST_BYTES] += record_size; in depot_alloc_stack() 552 counters[DEPOT_COUNTER_FREELIST_SIZE]++; in depot_free_stack() 553 counters[DEPOT_COUNTER_REFD_FREES]++; in depot_free_stack() 554 counters[DEPOT_COUNTER_REFD_INUSE]--; in depot_free_stack() 853 seq_printf(seq, "%s: %ld\n", counter_names[i], data_race(counters[i])); in stats_show()
|
| /linux/net/netfilter/ |
| H A D | xt_connbytes.c | 30 const struct nf_conn_counter *counters; in connbytes_mt() local 40 counters = acct->counter; in connbytes_mt() 45 what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets); in connbytes_mt() 48 what = atomic64_read(&counters[IP_CT_DIR_REPLY].packets); in connbytes_mt() 51 what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets); in connbytes_mt() 52 what += atomic64_read(&counters[IP_CT_DIR_REPLY].packets); in connbytes_mt() 59 what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes); in connbytes_mt() 62 what = atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); in connbytes_mt() 65 what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes); in connbytes_mt() 66 what += atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); in connbytes_mt() [all …]
|
| /linux/tools/testing/selftests/net/tcp_ao/lib/ |
| H A D | proc.c | 18 struct netstat_counter *counters; member 52 ret->counters = NULL; in lookup_get() 81 type->counters = reallocarray(type->counters, in netstat_read_type() 84 if (!type->counters) in netstat_read_type() 95 struct netstat_counter *nc = &type->counters[i]; in netstat_read_type() 133 type->counters = reallocarray(type->counters, i + 1, in snmp6_read() 135 if (!type->counters) in snmp6_read() 137 nc = &type->counters[i]; in snmp6_read() 196 free(ns->counters[i].name); in netstat_free() 197 free(ns->counters); in netstat_free() [all …]
|
| /linux/drivers/net/ethernet/aquantia/atlantic/macsec/ |
| H A D | macsec_api.c | 1825 struct aq_mss_egress_sc_counters *counters, in get_egress_sc_counters() argument 1837 counters->sc_protected_pkts[0] = in get_egress_sc_counters() 1839 counters->sc_protected_pkts[1] = in get_egress_sc_counters() 1845 counters->sc_encrypted_pkts[0] = in get_egress_sc_counters() 1847 counters->sc_encrypted_pkts[1] = in get_egress_sc_counters() 1853 counters->sc_protected_octets[0] = in get_egress_sc_counters() 1855 counters->sc_protected_octets[1] = in get_egress_sc_counters() 1861 counters->sc_encrypted_octets[0] = in get_egress_sc_counters() 1863 counters->sc_encrypted_octets[1] = in get_egress_sc_counters() 1870 struct aq_mss_egress_sc_counters *counters, in aq_mss_get_egress_sc_counters() argument [all …]
|
| H A D | macsec_api.h | 271 struct aq_mss_egress_sc_counters *counters, 280 struct aq_mss_egress_sa_counters *counters, 288 struct aq_mss_egress_common_counters *counters); 299 struct aq_mss_ingress_sa_counters *counters, 307 struct aq_mss_ingress_common_counters *counters);
|
| /linux/samples/cgroup/ |
| H A D | memcg_event_listener.c | 43 struct memcg_counters counters; member 49 static void print_memcg_counters(const struct memcg_counters *counters) in print_memcg_counters() argument 52 printf("\tlow: %ld\n", counters->low); in print_memcg_counters() 53 printf("\thigh: %ld\n", counters->high); in print_memcg_counters() 54 printf("\tmax: %ld\n", counters->max); in print_memcg_counters() 55 printf("\toom: %ld\n", counters->oom); in print_memcg_counters() 56 printf("\toom_kill: %ld\n", counters->oom_kill); in print_memcg_counters() 57 printf("\toom_group_kill: %ld\n", counters->oom_group_kill); in print_memcg_counters() 107 struct memcg_counters *counters = &events->counters; in read_memcg_events() local 116 .old = &counters->low, in read_memcg_events() [all …]
|
| /linux/Documentation/translations/zh_CN/core-api/ |
| H A D | local_ops.rst | 93 static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0); 105 local_inc(&get_cpu_var(counters)); 106 put_cpu_var(counters); 110 local_inc(this_cpu_ptr(&counters)); 123 sum += local_read(&per_cpu(counters, cpu)); 143 static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0); 152 local_inc(this_cpu_ptr(&counters)); 157 * local_inc(&get_cpu_var(counters)); 158 * put_cpu_var(counters); 166 /* Increment the counters */ [all …]
|
| /linux/kernel/gcov/ |
| H A D | gcc_base.c | 46 void __gcov_merge_add(gcov_type *counters, unsigned int n_counters) in __gcov_merge_add() argument 52 void __gcov_merge_single(gcov_type *counters, unsigned int n_counters) in __gcov_merge_single() argument 58 void __gcov_merge_delta(gcov_type *counters, unsigned int n_counters) in __gcov_merge_delta() argument 64 void __gcov_merge_ior(gcov_type *counters, unsigned int n_counters) in __gcov_merge_ior() argument 70 void __gcov_merge_time_profile(gcov_type *counters, unsigned int n_counters) in __gcov_merge_time_profile() argument 76 void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters) in __gcov_merge_icall_topn() argument
|
| /linux/net/ipv4/netfilter/ |
| H A D | arp_tables.c | 230 counter = xt_get_this_cpu_counter(&e->counters); in arpt_do_table() 319 e->counters.pcnt = pos; in mark_source_chains() 345 pos = e->counters.pcnt; in mark_source_chains() 346 e->counters.pcnt = 0; in mark_source_chains() 360 e->counters.pcnt = pos; in mark_source_chains() 379 e->counters.pcnt = pos; in mark_source_chains() 413 if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) in find_check_entry() 432 xt_percpu_counter_free(&e->counters); in find_check_entry() 495 e->counters = ((struct xt_counters) { 0, 0 }); in check_entry_size_and_hooks() 513 xt_percpu_counter_free(&e->counters); in cleanup_entry() [all …]
|
| H A D | ip_tables.c | 297 counter = xt_get_this_cpu_counter(&e->counters); in ipt_do_table() 383 e->counters.pcnt = pos; in mark_source_chains() 407 pos = e->counters.pcnt; in mark_source_chains() 408 e->counters.pcnt = 0; in mark_source_chains() 422 e->counters.pcnt = pos; in mark_source_chains() 441 e->counters.pcnt = pos; in mark_source_chains() 526 if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) in find_check_entry() 566 xt_percpu_counter_free(&e->counters); in find_check_entry() 629 /* Clear counters and comefrom */ in check_entry_size_and_hooks() 630 e->counters in check_entry_size_and_hooks() 740 get_counters(const struct xt_table_info * t,struct xt_counters counters[]) get_counters() argument 770 get_old_counters(const struct xt_table_info * t,struct xt_counters counters[]) get_old_counters() argument 792 struct xt_counters *counters; alloc_counters() local 816 struct xt_counters *counters; copy_entries_to_user() local 1043 struct xt_counters *counters; __do_replace() local 1213 compat_uptr_t counters; /* struct xt_counters * */ global() member 1219 compat_copy_entry_to_user(struct ipt_entry * e,void __user ** dstptr,unsigned int * size,struct xt_counters * counters,unsigned int i) compat_copy_entry_to_user() argument 1553 struct xt_counters *counters; compat_copy_entries_to_user() local [all...] |
| /linux/net/ipv6/netfilter/ |
| H A D | ip6_tables.c | 320 counter = xt_get_this_cpu_counter(&e->counters); in ip6t_do_table() 401 e->counters.pcnt = pos; in mark_source_chains() 425 pos = e->counters.pcnt; in mark_source_chains() 426 e->counters.pcnt = 0; in mark_source_chains() 440 e->counters.pcnt = pos; in mark_source_chains() 459 e->counters.pcnt = pos; in mark_source_chains() 545 if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) in find_check_entry() 584 xt_percpu_counter_free(&e->counters); in find_check_entry() 647 /* Clear counters and comefrom */ in check_entry_size_and_hooks() 648 e->counters in check_entry_size_and_hooks() 757 get_counters(const struct xt_table_info * t,struct xt_counters counters[]) get_counters() argument 787 get_old_counters(const struct xt_table_info * t,struct xt_counters counters[]) get_old_counters() argument 808 struct xt_counters *counters; alloc_counters() local 832 struct xt_counters *counters; copy_entries_to_user() local 1060 struct xt_counters *counters; __do_replace() local 1229 compat_uptr_t counters; /* struct xt_counters * */ global() member 1235 compat_copy_entry_to_user(struct ip6t_entry * e,void __user ** dstptr,unsigned int * size,struct xt_counters * counters,unsigned int i) compat_copy_entry_to_user() argument 1562 struct xt_counters *counters; compat_copy_entries_to_user() local [all...] |
| /linux/include/linux/ |
| H A D | alloc_tag.h | 30 struct alloc_tag_counters __percpu *counters; member 103 .counters = &_shared_alloc_tag }; 113 .counters = NULL }; 122 .counters = &_alloc_tag_cntr }; 144 counter = per_cpu_ptr(tag->counters, cpu); in alloc_tag_read() 193 this_cpu_inc(tag->counters->calls); in alloc_tag_ref_set() 200 this_cpu_add(tag->counters->bytes, bytes); in alloc_tag_add() 218 this_cpu_sub(tag->counters->bytes, bytes); in alloc_tag_sub() 219 this_cpu_dec(tag->counters->calls); in alloc_tag_sub()
|
| /linux/Documentation/core-api/ |
| H A D | local_ops.rst | 30 counters. They minimize the performance cost of standard atomic operations by 34 Having fast per CPU atomic counters is interesting in many cases: it does not 36 coherent counters in NMI handlers. It is especially useful for tracing purposes 37 and for various performance monitoring counters. 95 static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0); 107 local_inc(&get_cpu_var(counters)); 108 put_cpu_var(counters); 113 local_inc(this_cpu_ptr(&counters)); 117 Reading the counters 120 Those local counters can be read from foreign CPUs to sum the count. Note that [all …]
|
| /linux/drivers/net/wireless/intel/iwlwifi/mld/ |
| H A D | low_latency.c | 44 struct iwl_mld_low_latency_packets_counters *counters = in iwl_mld_calc_low_latency() local 47 spin_lock_bh(&counters->lock); in iwl_mld_calc_low_latency() 49 total_vo_vi_pkts += counters->vo_vi[mac_id]; in iwl_mld_calc_low_latency() 52 counters->vo_vi[mac_id] = 0; in iwl_mld_calc_low_latency() 54 spin_unlock_bh(&counters->lock); in iwl_mld_calc_low_latency() 267 struct iwl_mld_low_latency_packets_counters *counters; in iwl_mld_low_latency_update_counters() local 275 if (WARN_ON_ONCE(fw_id >= ARRAY_SIZE(counters->vo_vi) || in iwl_mld_low_latency_update_counters() 285 counters = &mld->low_latency.pkts_counters[queue]; in iwl_mld_low_latency_update_counters() 287 spin_lock_bh(&counters->lock); in iwl_mld_low_latency_update_counters() 288 counters->vo_vi[fw_id]++; in iwl_mld_low_latency_update_counters() [all …]
|
| /linux/drivers/accessibility/speakup/ |
| H A D | keyhelp.c | 51 u_char *kp, counters[MAXFUNCS], ch, ch1; in build_key_data() local 56 memset(counters, 0, sizeof(counters)); in build_key_data() 66 counters[*kp]++; in build_key_data() 70 if (counters[i] == 0) in build_key_data() 73 offset += (counters[i] + 1); in build_key_data() 90 counters[ch1]--; in build_key_data() 94 p_key = key_data + offset + counters[ch1]; in build_key_data()
|
| /linux/drivers/net/wireless/silabs/wfx/ |
| H A D | debug.c | 65 struct wfx_hif_mib_extended_count_table counters[3]; in wfx_counters_show() local 67 for (i = 0; i < ARRAY_SIZE(counters); i++) { in wfx_counters_show() 68 ret = wfx_hif_get_counters_table(wdev, i, counters + i); in wfx_counters_show() 79 le32_to_cpu(counters[2].count_##name), \ in wfx_counters_show() 80 le32_to_cpu(counters[0].count_##name), \ in wfx_counters_show() 81 le32_to_cpu(counters[1].count_##name)) in wfx_counters_show() 117 for (i = 0; i < ARRAY_SIZE(counters[0].reserved); i++) in wfx_counters_show() 119 le32_to_cpu(counters[2].reserved[i]), in wfx_counters_show() 120 le32_to_cpu(counters[0].reserved[i]), in wfx_counters_show() 121 le32_to_cpu(counters[1].reserved[i])); in wfx_counters_show()
|
| /linux/drivers/scsi/elx/efct/ |
| H A D | efct_xport.c | 105 struct efct_hw_link_stat_counts *counters, void *arg) in efct_xport_link_stats_cb() argument 110 counters[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter; in efct_xport_link_stats_cb() 112 counters[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter; in efct_xport_link_stats_cb() 114 counters[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter; in efct_xport_link_stats_cb() 116 counters[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter; in efct_xport_link_stats_cb() 118 counters[EFCT_HW_LINK_STAT_CRC_COUNT].counter; in efct_xport_link_stats_cb() 125 struct efct_hw_host_stat_counts *counters, void *arg) in efct_xport_host_stats_cb() argument 130 counters[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter; in efct_xport_host_stats_cb() 132 counters[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter; in efct_xport_host_stats_cb() 134 counters[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter; in efct_xport_host_stats_cb() [all …]
|
| /linux/tools/perf/ |
| H A D | design.txt | 5 Performance counters are special hardware registers available on most modern 13 hardware capabilities. It provides per task and per CPU counters, counter 15 provides "virtual" 64-bit counters, regardless of the width of the 16 underlying hardware counters. 18 Performance counters are accessed via special file descriptors. 32 Multiple counters can be kept open at a time, and the counters 130 * Special "software" counters provided by the kernel, even if the hardware 131 * does not support performance counters. These counters measure various 152 Counters come in two flavours: counting counters and sampling 153 counters. A "counting" counter is one that is used for counting the [all …]
|
| /linux/Documentation/admin-guide/perf/ |
| H A D | thunderx2-pmu.rst | 13 The DMC and L3C support up to 4 counters, while the CCPI2 supports up to 8 14 counters. Counters are independently programmable to different events and 15 can be started and stopped individually. None of the counters support an 16 overflow interrupt. DMC and L3C counters are 32-bit and read every 2 seconds. 17 The CCPI2 counters are 64-bit and assumed not to overflow in normal operation.
|
| H A D | alibaba_pmu.rst | 23 Each sub-channel has 36 PMU counters in total, which is classified into 26 - Group 0: PMU Cycle Counter. This group has one pair of counters 30 - Group 1: PMU Bandwidth Counters. This group has 8 counters that are used 32 selected rank, or four ranks separately in the first 4 counters. The base 35 - Group 2: PMU Retry Counters. This group has 10 counters, that intend to 38 - Group 3: PMU Common Counters. This group has 16 counters, that are used 41 For now, the Driveway PMU driver only uses counters in group 0 and group 3.
|
| /linux/Documentation/arch/powerpc/ |
| H A D | imc.rst | 17 IMC (In-Memory collection counters) is a hardware monitoring facility that 21 The Nest PMU counters are handled by a Nest IMC microcode which runs in the OCC 25 The Core and Thread IMC PMU counters are handled in the core. Core level PMU 26 counters give us the IMC counters' data per core and thread level PMU counters 27 give us the IMC counters' data per CPU thread. 51 The kernel discovers the IMC counters information in the device tree at the 52 `imc-counters` device node which has a compatible field 53 `ibm,opal-in-memory-counters`. From the device tree, the kernel parses the PMUs
|
| /linux/tools/perf/tests/shell/ |
| H A D | stat_bpf_counters_cgrp.sh | 15 if ! perf stat -a --bpf-counters --for-each-cgroup / true > /dev/null 2>&1; then 18 perf --no-pager stat -a --bpf-counters --for-each-cgroup / true || true 51 …check_system_wide_counted_output=$(perf stat -a --bpf-counters --for-each-cgroup ${test_cgroups} -…
|
| /linux/mm/kfence/ |
| H A D | core.c | 186 static atomic_long_t counters[KFENCE_COUNTER_COUNT]; variable 205 return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh; in should_skip_covered() 341 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); in check_canary_byte() 435 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]); in kfence_guarded_alloc() 509 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]); in kfence_guarded_alloc() 510 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]); in kfence_guarded_alloc() 525 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); in kfence_guarded_free() 576 atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]); in kfence_guarded_free() 577 atomic_long_inc(&counters[KFENCE_COUNTER_FREES]); in kfence_guarded_free() 580 atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]); in kfence_guarded_free() [all …]
|