Home
last modified time | relevance | path

Searched refs:per_cpu (Results 1 – 25 of 309) sorted by relevance

12345678910>>...13

/linux/arch/x86/xen/
H A Dsmp.c34 kfree(per_cpu(xen_resched_irq, cpu).name); in xen_smp_intr_free()
35 per_cpu(xen_resched_irq, cpu).name = NULL; in xen_smp_intr_free()
36 if (per_cpu(xen_resched_irq, cpu).irq >= 0) { in xen_smp_intr_free()
37 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); in xen_smp_intr_free()
38 per_cpu(xen_resched_irq, cpu).irq = -1; in xen_smp_intr_free()
40 kfree(per_cpu(xen_callfunc_irq, cpu).name); in xen_smp_intr_free()
41 per_cpu(xen_callfunc_irq, cpu).name = NULL; in xen_smp_intr_free()
42 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { in xen_smp_intr_free()
43 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); in xen_smp_intr_free()
44 per_cpu(xen_callfunc_irq, cpu).irq = -1; in xen_smp_intr_free()
[all …]
H A Dspinlock.c24 int irq = per_cpu(lock_kicker_irq, cpu); in xen_qlock_kick()
73 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", in xen_init_lock_cpu()
74 cpu, per_cpu(lock_kicker_irq, cpu)); in xen_init_lock_cpu()
77 per_cpu(irq_name, cpu) = name; in xen_init_lock_cpu()
87 per_cpu(lock_kicker_irq, cpu) = irq; in xen_init_lock_cpu()
100 kfree(per_cpu(irq_name, cpu)); in xen_uninit_lock_cpu()
101 per_cpu(irq_name, cpu) = NULL; in xen_uninit_lock_cpu()
106 irq = per_cpu(lock_kicker_irq, cpu); in xen_uninit_lock_cpu()
111 per_cpu(lock_kicker_irq, cpu) = -1; in xen_uninit_lock_cpu()
H A Dsmp_pv.c98 kfree(per_cpu(xen_irq_work, cpu).name); in xen_smp_intr_free_pv()
99 per_cpu(xen_irq_work, cpu).name = NULL; in xen_smp_intr_free_pv()
100 if (per_cpu(xen_irq_work, cpu).irq >= 0) { in xen_smp_intr_free_pv()
101 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); in xen_smp_intr_free_pv()
102 per_cpu(xen_irq_work, cpu).irq = -1; in xen_smp_intr_free_pv()
105 kfree(per_cpu(xen_pmu_irq, cpu).name); in xen_smp_intr_free_pv()
106 per_cpu(xen_pmu_irq, cpu).name = NULL; in xen_smp_intr_free_pv()
107 if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { in xen_smp_intr_free_pv()
108 unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL); in xen_smp_intr_free_pv()
109 per_cpu(xen_pmu_irq, cpu).irq = -1; in xen_smp_intr_free_pv()
[all …]
/linux/drivers/irqchip/
H A Dirq-armada-370-xp.c172 void __iomem *per_cpu; member
220 writel(hwirq, mpic->per_cpu + MPIC_INT_SET_MASK); in mpic_irq_mask()
231 writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_irq_unmask()
314 reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_msi_reenable_percpu()
316 writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_msi_reenable_percpu()
319 writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_msi_reenable_percpu()
369 writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_msi_init()
397 writel(MPIC_INT_CAUSE_PERF(cpuid), mpic->per_cpu + MPIC_INT_FABRIC_MASK); in mpic_perf_init()
406 reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_mask()
408 writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_mask()
[all …]
/linux/arch/arm/mm/
H A Dcontext.c67 asid = per_cpu(active_asids, cpu).counter; in a15_erratum_get_cpumask()
69 asid = per_cpu(reserved_asids, cpu); in a15_erratum_get_cpumask()
144 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); in flush_context()
153 asid = per_cpu(reserved_asids, i); in flush_context()
155 per_cpu(reserved_asids, i) = asid; in flush_context()
180 if (per_cpu(reserved_asids, cpu) == asid) { in check_update_reserved_asid()
182 per_cpu(reserved_asids, cpu) = newasid; in check_update_reserved_asid()
254 && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) in check_and_switch_context()
270 atomic64_set(&per_cpu(active_asids, cpu), asid); in check_and_switch_context()
H A Dproc-v7-bugs.c71 if (per_cpu(harden_branch_predictor_fn, cpu)) in spectre_v2_install_workaround()
76 per_cpu(harden_branch_predictor_fn, cpu) = in spectre_v2_install_workaround()
82 per_cpu(harden_branch_predictor_fn, cpu) = in spectre_v2_install_workaround()
88 per_cpu(harden_branch_predictor_fn, cpu) = in spectre_v2_install_workaround()
95 per_cpu(harden_branch_predictor_fn, cpu) = in spectre_v2_install_workaround()
/linux/arch/x86/kernel/apic/
H A Dx2apic_cluster.c58 struct cpumask *cmsk = per_cpu(cluster_masks, cpu); in __x2apic_send_IPI_mask()
110 struct cpumask **cpu_cmsk = &per_cpu(cluster_masks, cpu_i); in prefill_clustermask()
135 if (per_cpu(cluster_masks, cpu)) in alloc_clustermask()
150 cmsk = per_cpu(cluster_masks, cpu_i); in alloc_clustermask()
156 per_cpu(cluster_masks, cpu) = cmsk; in alloc_clustermask()
170 per_cpu(cluster_masks, cpu) = cmsk; in alloc_clustermask()
188 if (!zalloc_cpumask_var_node(&per_cpu(ipi_mask, cpu), GFP_KERNEL, node)) in x2apic_prepare_cpu()
196 struct cpumask *cmsk = per_cpu(cluster_masks, dead_cpu); in x2apic_dead_cpu()
200 free_cpumask_var(per_cpu(ipi_mask, dead_cpu)); in x2apic_dead_cpu()
/linux/arch/parisc/kernel/
H A Dirq.c75 per_cpu(local_ack_eiem, cpu) &= ~mask; in cpu_ack_irq()
78 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); in cpu_ack_irq()
90 per_cpu(local_ack_eiem, cpu) |= mask; in cpu_eoi_irq()
93 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); in cpu_eoi_irq()
126 #define irq_stats(x) (&per_cpu(irq_stat, x))
318 return per_cpu(cpu_data, cpu).txn_addr; in txn_affinity_addr()
330 (!per_cpu(cpu_data, next_cpu).txn_addr || in txn_alloc_addr()
403 stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack; in stack_overflow_check()
406 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu); in stack_overflow_check()
422 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu); in stack_overflow_check()
[all …]
H A Dtopology.c40 per_cpu(cpu_devices, cpuid).hotpluggable = 1; in store_cpu_topology()
42 if (register_cpu(&per_cpu(cpu_devices, cpuid), cpuid)) in store_cpu_topology()
49 p = &per_cpu(cpu_data, cpuid); in store_cpu_topology()
51 const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); in store_cpu_topology()
H A Dsmp.c123 struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu); in ipi_interrupt()
128 spinlock_t *lock = &per_cpu(ipi_lock, this_cpu); in ipi_interrupt()
199 struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu); in ipi_send()
200 spinlock_t *lock = &per_cpu(ipi_lock, cpu); in ipi_send()
335 const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); in smp_boot_one_cpu()
342 memset(&per_cpu(irq_stat, cpuid), 0, sizeof(irq_cpustat_t)); in smp_boot_one_cpu()
421 spin_lock_init(&per_cpu(ipi_lock, cpu)); in smp_prepare_cpus()
/linux/arch/x86/mm/
H A Dcpu_entry_area.c25 return per_cpu(_cea_offset, cpu); in cea_offset()
35 per_cpu(_cea_offset, i) = i; in init_cea_offsets()
56 per_cpu(_cea_offset, i) = cea; in init_cea_offsets()
117 cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, in percpu_setup_debug_store()
147 per_cpu(cea_exception_stacks, cpu) = &cea->estacks; in percpu_setup_exception_stacks()
172 &per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL); in percpu_setup_exception_stacks()
233 cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu), in setup_cpu_entry_area()
237 per_cpu(cpu_entry_area, cpu) = cea; in setup_cpu_entry_area()
/linux/arch/riscv/mm/
H A Dcontext.c51 if (per_cpu(reserved_context, cpu) == cntx) { in check_update_reserved_context()
53 per_cpu(reserved_context, cpu) = newcntx; in check_update_reserved_context()
73 cntx = atomic_long_xchg_relaxed(&per_cpu(active_context, i), 0); in __flush_context()
81 cntx = per_cpu(reserved_context, i); in __flush_context()
84 per_cpu(reserved_context, i) = cntx; in __flush_context()
168 old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu)); in set_mm_asid()
171 atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu), in set_mm_asid()
187 atomic_long_set(&per_cpu(active_context, cpu), cntx); in set_mm_asid()
/linux/arch/x86/kvm/vmx/
H A Dposted_intr.c93 raw_spinlock_t *spinlock = &per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu); in vmx_vcpu_pi_load()
182 raw_spin_lock_nested(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu), in pi_enable_wakeup_handler()
185 &per_cpu(wakeup_vcpus_on_cpu, vcpu->cpu)); in pi_enable_wakeup_handler()
186 raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu)); in pi_enable_wakeup_handler()
256 struct list_head *wakeup_list = &per_cpu(wakeup_vcpus_on_cpu, cpu); in pi_wakeup_handler()
257 raw_spinlock_t *spinlock = &per_cpu(wakeup_vcpus_on_cpu_lock, cpu); in pi_wakeup_handler()
271 INIT_LIST_HEAD(&per_cpu(wakeup_vcpus_on_cpu, cpu)); in pi_init_cpu()
272 raw_spin_lock_init(&per_cpu(wakeup_vcpus_on_cpu_lock, cpu)); in pi_init_cpu()
/linux/kernel/
H A Dsoftirq.c1040 per_cpu(tasklet_vec, cpu).tail = in softirq_init()
1041 &per_cpu(tasklet_vec, cpu).head; in softirq_init()
1042 per_cpu(tasklet_hi_vec, cpu).tail = in softirq_init()
1043 &per_cpu(tasklet_hi_vec, cpu).head; in softirq_init()
1080 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { in takeover_tasklets()
1081 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; in takeover_tasklets()
1082 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); in takeover_tasklets()
1083 per_cpu(tasklet_vec, cpu).head = NULL; in takeover_tasklets()
1084 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; in takeover_tasklets()
1088 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { in takeover_tasklets()
[all …]
/linux/arch/mips/kernel/
H A Dmips-cpc.c77 spin_lock_init(&per_cpu(cpc_core_lock, cpu)); in mips_cpc_probe()
100 spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), in mips_cpc_lock_other()
101 per_cpu(cpc_core_lock_flags, curr_core)); in mips_cpc_lock_other()
120 spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), in mips_cpc_unlock_other()
121 per_cpu(cpc_core_lock_flags, curr_core)); in mips_cpc_unlock_other()
H A Dtime.c57 per_cpu(pcp_lpj_ref, cpu) = in cpufreq_callback()
59 per_cpu(pcp_lpj_ref_freq, cpu) = freq->old; in cpufreq_callback()
74 lpj = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu), in cpufreq_callback()
75 per_cpu(pcp_lpj_ref_freq, cpu), in cpufreq_callback()
/linux/arch/riscv/kernel/
H A Dunaligned_access_speed.c46 if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) in check_unaligned_access()
114 per_cpu(misaligned_access_speed, cpu) = speed; in check_unaligned_access()
245 if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) { in riscv_online_cpu()
248 per_cpu(misaligned_access_speed, cpu) = unaligned_scalar_speed_param; in riscv_online_cpu()
294 if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) in check_vector_unaligned_access()
372 per_cpu(vector_misaligned_access, cpu) = speed; in check_vector_unaligned_access()
396 per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param; in riscv_online_cpu_vec()
401 if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) in riscv_online_cpu_vec()
453 per_cpu(misaligned_access_speed, cpu) = unaligned_scalar_speed_param; in check_unaligned_access_all_cpus()
474 per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param; in check_unaligned_access_all_cpus()
H A Dirq.c86 per_cpu(irq_shadow_call_stack_ptr, cpu) = in init_irq_scs()
100 per_cpu(irq_stack_ptr, cpu) = p; in init_irq_stacks()
112 per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu); in init_irq_stacks()
/linux/drivers/xen/events/
H A Devents_2l.c52 clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); in evtchn_2l_remove()
58 clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, old_cpu))); in evtchn_2l_bind_to_cpu()
59 set_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); in evtchn_2l_bind_to_cpu()
152 per_cpu(cpu_evtchn_mask, cpu)[idx] & in active_evtchns()
268 xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu); in xen_debug_interrupt()
280 v = per_cpu(xen_vcpu, i); in xen_debug_interrupt()
289 v = per_cpu(xen_vcpu, cpu); in xen_debug_interrupt()
353 memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) * in evtchn_2l_resume()
359 memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) * in evtchn_2l_percpu_deinit()
/linux/arch/powerpc/include/asm/
H A Dsmp.h116 return per_cpu(cpu_sibling_map, cpu); in cpu_sibling_mask()
121 return per_cpu(cpu_core_map, cpu); in cpu_core_mask()
126 return per_cpu(cpu_l2_cache_map, cpu); in cpu_l2_cache_mask()
131 return per_cpu(cpu_smallcore_map, cpu); in cpu_smallcore_mask()
145 return per_cpu(cpu_smallcore_map, cpu); in cpu_smt_mask()
147 return per_cpu(cpu_sibling_map, cpu); in cpu_smt_mask()
/linux/arch/arm/kernel/
H A Dsmp.c391 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); in smp_store_cpu_info()
487 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; in smp_cpus_done()
622 per_cpu(cpu_completion, cpu) = completion; in register_ipi_completion()
628 complete(per_cpu(cpu_completion, cpu)); in ipi_complete()
805 if (!per_cpu(l_p_j_ref, first)) { in cpufreq_callback()
807 per_cpu(l_p_j_ref, cpu) = in cpufreq_callback()
808 per_cpu(cpu_data, cpu).loops_per_jiffy; in cpufreq_callback()
809 per_cpu(l_p_j_ref_freq, cpu) = freq->old; in cpufreq_callback()
824 lpj = cpufreq_scale(per_cpu(l_p_j_ref, first), in cpufreq_callback()
825 per_cpu(l_p_j_ref_freq, first), freq->new); in cpufreq_callback()
[all …]
/linux/arch/arm64/kvm/
H A Dvmid.c53 vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0); in flush_context()
57 vmid = per_cpu(reserved_vmids, cpu); in flush_context()
59 per_cpu(reserved_vmids, cpu) = vmid; in flush_context()
83 if (per_cpu(reserved_vmids, cpu) == vmid) { in check_update_reserved_vmid()
85 per_cpu(reserved_vmids, cpu) = newvmid; in check_update_reserved_vmid()
/linux/arch/powerpc/platforms/powernv/
H A Dsubcore.c155 while(per_cpu(split_state, i).step < step) in wait_for_sync_step()
196 per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT; in unsplit_core()
230 split_core_secondary_loop(&per_cpu(split_state, cpu).step); in split_core()
262 per_cpu(split_state, smp_processor_id()).step = SYNC_STEP_FINISHED; in cpu_do_split()
320 while(per_cpu(split_state, cpu).step < SYNC_STEP_FINISHED) in cpu_update_split_mode()
355 state = &per_cpu(split_state, cpu); in set_subcores_per_core()
/linux/arch/arm/mach-omap2/
H A Domap-mpuss-lowpower.c120 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); in set_cpu_wakeup_addr()
131 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); in scu_pwrst_prepare()
185 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); in l2x0_pwrst_prepare()
230 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); in omap4_enter_lowpower()
317 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); in omap4_hotplug_cpu()
375 pm_info = &per_cpu(omap4_pm_info, 0x0); in omap4_mpuss_init()
399 pm_info = &per_cpu(omap4_pm_info, 0x1); in omap4_mpuss_init()
/linux/kernel/locking/
H A Dqspinlock_stat.h52 sum += per_cpu(lockevents[id], cpu); in lockevent_read()
60 kicks += per_cpu(EVENT_COUNT(pv_kick_unlock), cpu); in lockevent_read()
64 kicks += per_cpu(EVENT_COUNT(pv_kick_wake), cpu); in lockevent_read()
112 per_cpu(pv_kick_time, cpu) = start; in __pv_kick()

12345678910>>...13