| /linux/net/netfilter/ |
| H A D | nft_counter.c | 40 struct nft_counter *this_cpu; in nft_counter_do_eval() local 43 this_cpu = this_cpu_ptr(priv->counter); in nft_counter_do_eval() 47 u64_stats_add(&this_cpu->bytes, pkt->skb->len); in nft_counter_do_eval() 48 u64_stats_inc(&this_cpu->packets); in nft_counter_do_eval() 67 struct nft_counter *this_cpu; in nft_counter_do_init() local 73 this_cpu = raw_cpu_ptr(cpu_stats); in nft_counter_do_init() 75 u64_stats_set(&this_cpu->packets, in nft_counter_do_init() 79 u64_stats_set(&this_cpu->bytes, in nft_counter_do_init() 113 struct nft_counter *this_cpu; in nft_counter_reset() local 116 this_cpu = this_cpu_ptr(priv->counter); in nft_counter_reset() [all …]
|
| /linux/Documentation/translations/zh_CN/core-api/ |
| H A D | this_cpu_ops.rst | 16 this_cpu操作 22 this_cpu操作是一种优化访问与当前执行处理器相关的每CPU变量的方法。这是通过使用段寄 25 this_cpu操作将每CPU变量的偏移量添加到处理器特定的每CPU基址上,并将该操作编码到对 42 this_cpu操作的主要用途是优化计数器操作。 44 定义了以下具有隐含抢占保护的this_cpu()操作。可以使用这些操作而不用担心抢占和中断:: 63 this_cpu操作的内部工作 85 请思考下面this_cpu操作:: 105 会在this_cpu指令执行的前后直接移动该进程。一般来说,这意味着每个处理器的单个计数 141 在每CPU操作的上下文中,上面表达式说明x是一个每CPU变量。大多数this_cpu操作都需要一 180 如果我们后面不使用 ``this_cpu ops`` 来操作字段,则指针的计算可能需要使用 [all …]
|
| H A D | local_ops.rst | 24 注意,基于 ``local_t`` 的操作不建议用于一般内核操作。请使用 ``this_cpu`` 26 经被 ``this_cpu`` 操作所取代。 ``this_cpu`` 操作在一条指令中结合了重
|
| /linux/arch/parisc/kernel/ |
| H A D | smp.c | 122 int this_cpu = smp_processor_id(); in ipi_interrupt() local 123 struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu); in ipi_interrupt() 128 spinlock_t *lock = &per_cpu(ipi_lock, this_cpu); in ipi_interrupt() 146 smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu); in ipi_interrupt() 150 smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu); in ipi_interrupt() 156 smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); in ipi_interrupt() 162 smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu); in ipi_interrupt() 166 smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu); in ipi_interrupt() 171 smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu); in ipi_interrupt() 175 smp_debug(100, KERN_DEBUG "CPU%d ENTER_KGDB\n", this_cpu); in ipi_interrupt() [all …]
|
| /linux/arch/sparc/kernel/ |
| H A D | process_64.c | 192 int this_cpu) in __global_reg_self() argument 198 rp = &global_cpu_snapshot[this_cpu].reg; in __global_reg_self() 244 int this_cpu, cpu; in arch_trigger_cpumask_backtrace() local 251 this_cpu = raw_smp_processor_id(); in arch_trigger_cpumask_backtrace() 255 if (cpumask_test_cpu(this_cpu, mask) && this_cpu != exclude_cpu) in arch_trigger_cpumask_backtrace() 256 __global_reg_self(tp, regs, this_cpu); in arch_trigger_cpumask_backtrace() 272 (cpu == this_cpu ? '*' : ' '), cpu, in arch_trigger_cpumask_backtrace() 309 static void __global_pmu_self(int this_cpu) in __global_pmu_self() argument 317 pp = &global_cpu_snapshot[this_cpu].pmu; in __global_pmu_self() 343 int this_cpu, cpu; in pmu_snapshot_all_cpus() local [all …]
|
| H A D | nmi.c | 75 int this_cpu = smp_processor_id(); in die_nmi() local 82 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); in die_nmi() 84 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); in die_nmi()
|
| /linux/arch/alpha/kernel/ |
| H A D | smp.c | 513 int this_cpu = smp_processor_id(); in handle_ipi() local 514 unsigned long *pending_ipis = &ipi_data[this_cpu].bits; in handle_ipi() 519 this_cpu, *pending_ipis, regs->pc)); in handle_ipi() 546 this_cpu, which); in handle_ipi() 554 cpu_data[this_cpu].ipi_count++; in handle_ipi() 642 int cpu, this_cpu = smp_processor_id(); in flush_tlb_mm() local 644 if (!cpu_online(cpu) || cpu == this_cpu) in flush_tlb_mm() 689 int cpu, this_cpu = smp_processor_id(); in flush_tlb_page() local 691 if (!cpu_online(cpu) || cpu == this_cpu) in flush_tlb_page() 743 int cpu, this_cpu = smp_processor_id(); in flush_icache_user_page() local [all …]
|
| /linux/tools/power/cpupower/utils/idle_monitor/ |
| H A D | cpuidle_sysfs.c | 145 int this_cpu; in cpuidle_register() local 147 this_cpu = sched_getcpu(); in cpuidle_register() 150 cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(this_cpu); in cpuidle_register() 156 tmp = cpuidle_state_name(this_cpu, num); in cpuidle_register() 165 tmp = cpuidle_state_desc(this_cpu, num); in cpuidle_register()
|
| /linux/arch/x86/kernel/ |
| H A D | smp.c | 151 unsigned int old_cpu, this_cpu; in native_stop_other_cpus() local 159 this_cpu = smp_processor_id(); in native_stop_other_cpus() 160 if (!atomic_try_cmpxchg(&stopping_cpu, &old_cpu, this_cpu)) in native_stop_other_cpus() 191 cpumask_clear_cpu(this_cpu, &cpus_stop_mask); in native_stop_other_cpus()
|
| /linux/Documentation/arch/arm/ |
| H A D | vlocks.rst | 36 bool vlock_trylock(int this_cpu) 39 currently_voting[this_cpu] = 1; 42 currently_voting[this_cpu] = 0; 47 last_vote = this_cpu; 48 currently_voting[this_cpu] = 0; 57 if (last_vote == this_cpu) 100 my_town = towns[(this_cpu >> 4) & 0xf]; 101 I_won = vlock_trylock(my_town, this_cpu & 0xf); 104 my_state = states[(this_cpu >> 8) & 0xf]; 105 I_won = vlock_lock(my_state, this_cpu & 0xf)); [all …]
|
| /linux/kernel/trace/ |
| H A D | trace_clock.c | 97 int this_cpu; in trace_clock_global() local 102 this_cpu = raw_smp_processor_id(); in trace_clock_global() 116 now = sched_clock_cpu(this_cpu); in trace_clock_global()
|
| /linux/lib/ |
| H A D | nmi_backtrace.c | 40 int i, this_cpu = get_cpu(); in nmi_trigger_cpumask_backtrace() local 61 if (cpumask_test_cpu(this_cpu, to_cpumask(backtrace_mask))) in nmi_trigger_cpumask_backtrace() 66 this_cpu, nr_cpumask_bits, to_cpumask(backtrace_mask)); in nmi_trigger_cpumask_backtrace()
|
| H A D | smp_processor_id.c | 14 int this_cpu = raw_smp_processor_id(); in check_preemption_disabled() local 53 return this_cpu; in check_preemption_disabled()
|
| /linux/init/ |
| H A D | calibrate.c | 282 int this_cpu = smp_processor_id(); in calibrate_delay() local 284 if (per_cpu(cpu_loops_per_jiffy, this_cpu)) { in calibrate_delay() 285 lpj = per_cpu(cpu_loops_per_jiffy, this_cpu); in calibrate_delay() 309 per_cpu(cpu_loops_per_jiffy, this_cpu) = lpj; in calibrate_delay()
|
| /linux/arch/x86/kernel/apic/ |
| H A D | x2apic_phys.c | 57 unsigned long this_cpu; in __x2apic_send_IPI_mask() local 65 this_cpu = smp_processor_id(); in __x2apic_send_IPI_mask() 67 if (apic_dest == APIC_DEST_ALLBUT && this_cpu == query_cpu) in __x2apic_send_IPI_mask()
|
| /linux/kernel/ |
| H A D | smp.c | 643 int this_cpu; in smp_call_function_single() local 652 this_cpu = get_cpu(); in smp_call_function_single() 660 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() in smp_call_function_single() 785 int cpu, last_cpu, this_cpu = smp_processor_id(); in smp_call_function_many_cond() local 799 if (cpu_online(this_cpu) && !oops_in_progress && in smp_call_function_many_cond() 812 if (cpumask_any_and_but(mask, cpu_online_mask, this_cpu) < nr_cpu_ids) { in smp_call_function_many_cond() 815 __cpumask_clear_cpu(this_cpu, cfd->cpumask); in smp_call_function_many_cond() 863 if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask) && in smp_call_function_many_cond() 864 (!cond_func || cond_func(this_cpu, info))) { in smp_call_function_many_cond()
|
| /linux/arch/arm/kernel/ |
| H A D | smp_tlb.c | 166 int this_cpu; in broadcast_tlb_mm_a15_erratum() local 172 this_cpu = get_cpu(); in broadcast_tlb_mm_a15_erratum() 173 a15_erratum_get_cpumask(this_cpu, mm, &mask); in broadcast_tlb_mm_a15_erratum()
|
| H A D | machine_kexec.c | 105 int cpu, this_cpu = raw_smp_processor_id(); in crash_smp_send_stop() local 112 if (cpu == this_cpu) in crash_smp_send_stop()
|
| /linux/arch/s390/mm/ |
| H A D | maccess.c | 157 int this_cpu, cpu; in xlate_dev_mem_ptr() local 160 this_cpu = get_cpu(); in xlate_dev_mem_ptr() 175 } else if (cpu == this_cpu) { in xlate_dev_mem_ptr()
|
| /linux/drivers/cpuidle/ |
| H A D | cpuidle-ux500.c | 26 int this_cpu = smp_processor_id(); in ux500_enter_idle() local 49 if (!prcmu_is_cpu_in_wfi(this_cpu ? 0 : 1)) in ux500_enter_idle()
|
| /linux/arch/x86/kernel/cpu/ |
| H A D | common.c | 205 static const struct cpu_dev *this_cpu = &default_cpu; variable 720 if (!this_cpu) in table_lookup_model() 723 info = this_cpu->legacy_models; in table_lookup_model() 864 if (this_cpu->legacy_cache_size) in cpu_detect_cache_sizes() 865 l2size = this_cpu->legacy_cache_size(c, l2size); in cpu_detect_cache_sizes() 888 if (this_cpu->c_detect_tlb) in cpu_detect_tlb() 889 this_cpu->c_detect_tlb(c); in cpu_detect_tlb() 911 this_cpu = cpu_devs[i]; in get_cpu_vendor() 912 c->x86_vendor = this_cpu->c_x86_vendor; in get_cpu_vendor() 921 this_cpu = &default_cpu; in get_cpu_vendor() [all …]
|
| /linux/arch/powerpc/include/asm/ |
| H A D | dbell.h | 140 int this_cpu = get_cpu(); in doorbell_try_core_ipi() local 143 if (cpumask_test_cpu(cpu, cpu_sibling_mask(this_cpu))) { in doorbell_try_core_ipi()
|
| /linux/arch/x86/hyperv/ |
| H A D | hv_apic.c | 170 int cur_cpu, vcpu, this_cpu = smp_processor_id(); in __send_ipi_mask() local 185 (exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask))) in __send_ipi_mask() 214 if (exclude_self && cur_cpu == this_cpu) in __send_ipi_mask()
|
| /linux/arch/arm/include/asm/ |
| H A D | mmu_context.h | 49 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, 52 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, in a15_erratum_get_cpumask() argument
|
| /linux/tools/perf/ |
| H A D | builtin-sched.c | 1572 static void print_sched_map(struct perf_sched *sched, struct perf_cpu this_cpu, int cpus_nr, in print_sched_map() argument 1593 if (cpu.cpu == this_cpu.cpu) in print_sched_map() 1596 color_fprintf(stdout, cpu.cpu != this_cpu.cpu ? color : cpu_color, "%c", symbol); in print_sched_map() 1607 if (cpu.cpu == this_cpu.cpu) in print_sched_map() 1632 struct perf_cpu this_cpu = { in map_switch_event() local 1643 BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0); in map_switch_event() 1645 if (this_cpu.cpu > sched->max_cpu.cpu) in map_switch_event() 1646 sched->max_cpu = this_cpu; in map_switch_event() 1650 if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) { in map_switch_event() 1651 sched->map.comp_cpus[cpus_nr++] = this_cpu; in map_switch_event() [all …]
|