Home
last modified time | relevance | path

Searched refs:per_cpu_ptr (Results 1 – 25 of 315) sorted by relevance

12345678910>>...13

/linux/fs/xfs/
H A Dxfs_stats.h167 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->s.v++; \
168 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->s.v++; \
173 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->s.v--; \
174 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->s.v--; \
179 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->s.v += (inc); \
180 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->s.v += (inc); \
185 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->a[off]++; \
186 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->a[off]++; \
191 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->a[off]; \
192 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->a[off]; \
[all …]
H A Dxfs_stats.c15 val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx)); in counter_val()
73 xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes; in xfs_stats_format()
74 xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes; in xfs_stats_format()
75 xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes; in xfs_stats_format()
76 defer_relog += per_cpu_ptr(stats, i)->s.defer_relog; in xfs_stats_format()
102 vn_active = per_cpu_ptr(stats, c)->s.vn_active; in xfs_stats_clearall()
103 memset(per_cpu_ptr(stats, c), 0, sizeof(*stats)); in xfs_stats_clearall()
104 per_cpu_ptr(stats, c)->s.vn_active = vn_active; in xfs_stats_clearall()
/linux/drivers/infiniband/ulp/rtrs/
H A Drtrs-clt-stats.c27 s = per_cpu_ptr(stats->pcpu_stats, con->cpu); in rtrs_clt_update_wc_stats()
47 s = per_cpu_ptr(stats->pcpu_stats, cpu); in rtrs_clt_stats_migration_from_cnt_to_str()
66 s = per_cpu_ptr(stats->pcpu_stats, cpu); in rtrs_clt_stats_migration_to_cnt_to_str()
90 r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma; in rtrs_clt_stats_rdma_to_str()
119 s = per_cpu_ptr(stats->pcpu_stats, cpu); in rtrs_clt_reset_rdma_stats()
135 s = per_cpu_ptr(stats->pcpu_stats, cpu); in rtrs_clt_reset_cpu_migr_stats()
H A Drtrs-srv-stats.c21 r = per_cpu_ptr(stats->rdma_stats, cpu); in rtrs_srv_reset_rdma_stats()
40 r = per_cpu_ptr(stats->rdma_stats, cpu); in rtrs_srv_stats_rdma_to_str()
/linux/kernel/
H A Dsmpboot.c172 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in __smpboot_create_thread()
197 *per_cpu_ptr(ht->store, cpu) = tsk; in __smpboot_create_thread()
230 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in smpboot_unpark_thread()
249 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in smpboot_park_thread()
272 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in smpboot_destroy_threads()
276 *per_cpu_ptr(ht->store, cpu) = NULL; in smpboot_destroy_threads()
H A Drelay.c204 *per_cpu_ptr(chan->buf, buf->cpu) = NULL; in relay_destroy_buf()
327 if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) { in relay_reset()
334 if ((buf = *per_cpu_ptr(chan->buf, i))) in relay_reset()
382 return *per_cpu_ptr(chan->buf, 0); in relay_open_buf()
406 *per_cpu_ptr(chan->buf, 0) = buf; in relay_open_buf()
440 if (*per_cpu_ptr(chan->buf, cpu)) in relay_prepare_cpu()
448 *per_cpu_ptr(chan->buf, cpu) = buf; in relay_prepare_cpu()
520 *per_cpu_ptr(chan->buf, i) = buf; in relay_open()
529 if ((buf = *per_cpu_ptr(chan->buf, i))) in relay_open()
594 buf = *per_cpu_ptr(cha in relay_late_setup_files()
[all...]
H A Dcpu.c173 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_invoke_callback()
311 atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu); in cpuhp_wait_for_sync_state()
362 atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu); in cpuhp_bp_sync_dead()
404 atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu); in cpuhp_can_boot_ap()
794 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in bringup_wait_for_ap_online()
827 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_bringup_ap()
859 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in bringup_cpu()
1128 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_invoke_ap_callback()
1177 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_kick_ap_work()
1208 st = per_cpu_ptr(&cpuhp_state, cpu); in cpuhp_init_state()
[all …]
/linux/kernel/irq/
H A Dmatrix.c72 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); in irq_alloc_matrix()
144 cm = per_cpu_ptr(m->maps, cpu); in matrix_find_best_cpu()
165 cm = per_cpu_ptr(m->maps, cpu); in matrix_find_best_cpu_managed()
221 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_reserve_managed()
262 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_remove_managed()
305 cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_alloc_managed()
400 cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_alloc()
427 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_free()
510 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); in irq_matrix_debug_show()
/linux/kernel/sched/
H A Dcpuacct.c97 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_read()
98 u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; in cpuacct_cpuusage_read()
137 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_write()
138 u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; in cpuacct_cpuusage_write()
270 u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; in cpuacct_stats_show()
278 cputime.sum_exec_runtime += *per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_stats_show()
342 *per_cpu_ptr(ca->cpuusage, cpu) += cputime; in cpuacct_charge()
H A Dtopology.c937 sibling = *per_cpu_ptr(sdd->sd, i); in build_balance_mask()
998 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in init_overlap_sched_group()
1058 sibling = *per_cpu_ptr(sdd->sd, i); in build_overlap_sched_groups()
1208 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in get_group()
1216 sg = *per_cpu_ptr(sdd->sg, cpu); in get_group()
1217 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group()
1539 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); in claim_allocations()
1540 *per_cpu_ptr(sdd->sd, cpu) = NULL; in claim_allocations()
1542 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) in claim_allocations()
1543 *per_cpu_ptr(sd in claim_allocations()
[all...]
/linux/kernel/bpf/
H A Dpercpu_freelist.c15 struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu); in pcpu_freelist_init()
66 head = per_cpu_ptr(s->freelist, cpu); in ___pcpu_freelist_push_nmi()
110 head = per_cpu_ptr(s->freelist, cpu); in pcpu_freelist_populate()
128 head = per_cpu_ptr(s->freelist, cpu); in ___pcpu_freelist_pop()
160 head = per_cpu_ptr(s->freelist, cpu); in ___pcpu_freelist_pop_nmi()
H A Dbpf_lru_list.c413 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_pop_free()
446 loc_l = per_cpu_ptr(clru->local_list, cpu); in bpf_common_lru_pop_free()
475 steal_loc_l = per_cpu_ptr(clru->local_list, steal); in bpf_common_lru_pop_free()
520 loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu); in bpf_common_lru_push_free()
547 l = per_cpu_ptr(lru->percpu_lru, node->cpu); in bpf_percpu_lru_push_free()
597 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_populate()
664 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_lru_init()
678 loc_l = per_cpu_ptr(clru->local_list, cpu); in bpf_lru_init()
/linux/arch/x86/events/amd/
H A Duncore.c148 struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu); in amd_uncore_add()
195 struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu); in amd_uncore_del()
223 ctx = *per_cpu_ptr(pmu->ctx, event->cpu); in amd_uncore_event_init()
397 union amd_uncore_info *info = per_cpu_ptr(uncore->info, cpu); in amd_uncore_ctx_cid()
404 union amd_uncore_info *info = per_cpu_ptr(uncore->info, cpu); in amd_uncore_ctx_gid()
411 union amd_uncore_info *info = per_cpu_ptr(uncore->info, cpu); in amd_uncore_ctx_num_pmcs()
426 ctx = *per_cpu_ptr(pmu->ctx, cpu); in amd_uncore_ctx_free()
438 *per_cpu_ptr(pmu->ctx, cpu) = NULL; in amd_uncore_ctx_free()
456 *per_cpu_ptr(pmu->ctx, cpu) = NULL; in amd_uncore_ctx_init()
468 prev = *per_cpu_ptr(pmu->ctx, j); in amd_uncore_ctx_init()
[all …]
/linux/arch/x86/kernel/cpu/
H A Daperfmperf.c383 per_cpu_ptr(arch_cpu_scale, cpu)->capacity = SCHED_CAPACITY_SCALE; in arch_enable_hybrid_capacity_scale()
384 per_cpu_ptr(arch_cpu_scale, cpu)->freq_ratio = arch_max_freq_ratio; in arch_enable_hybrid_capacity_scale()
414 WRITE_ONCE(per_cpu_ptr(arch_cpu_scale, cpu)->capacity, in arch_set_cpu_capacity()
416 WRITE_ONCE(per_cpu_ptr(arch_cpu_scale, cpu)->freq_ratio, in arch_set_cpu_capacity()
426 return READ_ONCE(per_cpu_ptr(arch_cpu_scale, cpu)->capacity); in arch_scale_cpu_capacity()
503 struct aperfmperf *s = per_cpu_ptr(&cpu_samples, cpu); in arch_freq_get_on_cpu()
/linux/fs/squashfs/
H A Ddecompressor_multi_percpu.c39 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create()
53 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create()
70 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_destroy()
/linux/kernel/trace/
H A Dtrace_functions_graph.c179 data = per_cpu_ptr(tr->array_buffer.data, cpu); in trace_graph_entry()
261 data = per_cpu_ptr(tr->array_buffer.data, cpu); in trace_graph_return()
420 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); in verif_pid()
728 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_entry_leaf()
781 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_entry_nested()
885 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); in check_irq_entry()
931 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); in check_irq_return()
1018 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_return()
1089 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; in print_graph_comment()
1155 if (data && per_cpu_ptr(dat in print_graph_function_flags()
[all...]
H A Dtrace_sched_wakeup.c85 *data = per_cpu_ptr(tr->array_buffer.data, cpu); in func_prolog_preempt_disable()
462 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); in probe_wakeup_sched_switch()
476 data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu); in probe_wakeup_sched_switch()
499 atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); in probe_wakeup_sched_switch()
554 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); in probe_wakeup()
586 data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu); in probe_wakeup()
601 atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); in probe_wakeup()
/linux/drivers/irqchip/
H A Dirq-riscv-imsic-state.c155 mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu); in __imsic_local_sync()
241 lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); in imsic_vector_mask()
263 lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); in imsic_vector_unmask()
311 old_lpriv = per_cpu_ptr(imsic->lpriv, old_vec->cpu); in imsic_vector_move()
315 new_lpriv = per_cpu_ptr(imsic->lpriv, new_vec->cpu); in imsic_vector_move()
336 lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); in imsic_vector_debug_show()
363 struct imsic_local_priv *lpriv = per_cpu_ptr(imsic->lpriv, cpu); in imsic_vector_from_local_id()
385 lpriv = per_cpu_ptr(imsic->lpriv, cpu); in imsic_vector_alloc()
410 lpriv = per_cpu_ptr(imsic->lpriv, cpu); in imsic_local_cleanup()
433 lpriv = per_cpu_ptr(imsic->lpriv, cpu); in imsic_local_init()
[all …]
H A Dirq-sifive-plic.c123 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); in plic_irq_toggle()
252 priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; in plic_irq_suspend()
261 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); in plic_irq_suspend()
284 priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; in plic_irq_resume()
293 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); in plic_irq_resume()
611 handler = per_cpu_ptr(&plic_handlers, cpu); in plic_probe()
655 handler = per_cpu_ptr(&plic_handlers, cpu); in plic_probe()
694 handler = per_cpu_ptr(&plic_handlers, cpu); in plic_probe()
/linux/drivers/clocksource/
H A Dtimer-mp-csky.c78 struct timer_of *to = per_cpu_ptr(&csky_to, cpu); in csky_mptimer_starting_cpu()
145 to = per_cpu_ptr(&csky_to, cpu); in csky_mptimer_init()
168 to = per_cpu_ptr(&csky_to, cpu_rollback); in csky_mptimer_init()
/linux/drivers/powercap/
H A Didle_inject.c108 iit = per_cpu_ptr(&idle_inject_thread, cpu); in idle_inject_wakeup()
154 iit = per_cpu_ptr(&idle_inject_thread, cpu); in idle_inject_fn()
280 iit = per_cpu_ptr(&idle_inject_thread, cpu); in idle_inject_stop()
311 per_cpu_ptr(&idle_inject_thread, cpu); in idle_inject_should_run()
/linux/tools/testing/radix-tree/linux/
H A Dpercpu.h
/linux/drivers/hv/
H A Dhv.c108 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); in hv_synic_alloc()
120 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); in hv_synic_alloc()
212 per_cpu_ptr(hv_context.cpu_context, cpu); in hv_synic_free()
265 per_cpu_ptr(hv_context.cpu_context, cpu); in hv_synic_enable_regs()
348 per_cpu_ptr(hv_context.cpu_context, cpu); in hv_synic_disable_regs()
/linux/arch/x86/kernel/
H A Dkgdb.c208 bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu); in kgdb_correct_hw_break()
237 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_reserve_slot()
249 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_reserve_slot()
264 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_release_slot()
304 bp = *per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_remove_all_hw_break()
397 bp = *per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_disable_hw_debug()
666 pevent = per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_arch_late()
/linux/kernel/rcu/
H A Dtasks.h271 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in cblist_init_generic()
364 rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu); in call_rcu_tasks_generic()
442 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in rcu_tasks_need_gpcb()
473 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in rcu_tasks_need_gpcb()
528 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in rcu_tasks_invoke_cbs()
627 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0)); in rcu_tasks_kthread()
638 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in synchronize_rcu_tasks_generic()
730 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
777 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in rcu_tasks_wait_gp()
1050 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rcu_task in check_all_holdout_tasks()
[all...]

12345678910>>...13