| /linux/include/asm-generic/ |
| H A D | local.h | 29 #define local_read(l) atomic_long_read(&(l)->a) macro 52 #define __local_inc(l) local_set((l), local_read(l) + 1) 53 #define __local_dec(l) local_set((l), local_read(l) - 1) 54 #define __local_add(i,l) local_set((l), local_read(l) + (i)) 55 #define __local_sub(i,l) local_set((l), local_read(l) - (i))
|
| H A D | local64.h | 30 #define local64_read(l) local_read(&(l)->a)
|
| /linux/Documentation/translations/zh_CN/core-api/ |
| H A D | local_ops.rst | 117 那些本地计数器可以从外部的CPU中读取,以求得计数的总和。请注意,local_read 123 sum += local_read(&per_cpu(counters, cpu)); 125 如果你想使用远程local_read来同步CPU之间对资源的访问,必须在写入者和读取者 172 local_read(&per_cpu(counters, cpu)));
|
| /linux/arch/x86/include/asm/ |
| H A D | local.h | 16 #define local_read(l) atomic_long_read(&(l)->a) macro 142 long c = local_read(l); in local_xchg() 161 long c = local_read(l); in local_add_unless()
|
| /linux/include/linux/ |
| H A D | part_stat.h | 78 local_read(&(part_stat_get(part, field))) 80 local_read(&(part_stat_get_cpu(part, field, cpu)))
|
| /linux/kernel/events/ |
| H A D | ring_buffer.c | 51 handle->wakeup = local_read(&rb->wakeup); in perf_output_get_handle() 80 head = local_read(&rb->head); in perf_output_put_handle() 129 if (unlikely(head != local_read(&rb->head))) { in perf_output_put_handle() 134 if (handle->wakeup != local_read(&rb->wakeup)) in perf_output_put_handle() 190 have_lost = local_read(&rb->lost); in __perf_output_begin() 199 offset = local_read(&rb->head); in __perf_output_begin() 238 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark)) in __perf_output_begin()
|
| H A D | core.c | 3697 if (local_read(&ctx->nr_no_switch_fast) || in perf_event_context_sched_out() 3698 local_read(&next_ctx->nr_no_switch_fast)) { in perf_event_context_sched_out()
|
| /linux/kernel/trace/ |
| H A D | ring_buffer.c | 393 return local_read(&bpage->page->commit); in rb_page_commit() 676 commit = local_read(&page->page->commit); in verify_event() 677 write = local_read(&page->write); in verify_event() 745 nest = local_read(&cpu_buffer->committing); in ring_buffer_event_time_stamp() 776 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages() 777 lost = local_read(&buffer->buffers[cpu]->pages_lost); in ring_buffer_nr_dirty_pages() 778 cnt = local_read(&buffer->buffers[cpu]->pages_touched); in ring_buffer_nr_dirty_pages() 1826 if ((unsigned)local_read(&subbuf->commit) > subbuf_size) { in rb_cpu_meta_valid() 1903 tail = local_read(&dpage->commit); in rb_validate_buffer() 1928 entry_bytes += local_read(&cpu_buffer->reader_page->page->commit); in rb_meta_validate_events() [all …]
|
| H A D | trace_osnoise.c | 772 int_counter = local_read(&osn_var->int_counter); in get_int_safe_duration() 781 } while (int_counter != local_read(&osn_var->int_counter)); in get_int_safe_duration() 810 int_counter = local_read(&osn_var->int_counter); in set_int_safe_time() 818 } while (int_counter != local_read(&osn_var->int_counter)); in set_int_safe_time() 833 int_counter = local_read(&osn_var->int_counter); in copy_int_safe_time() 841 } while (int_counter != local_read(&osn_var->int_counter)); in copy_int_safe_time()
|
| H A D | trace_irqsoff.c | 409 if (unlikely(!data) || local_read(&data->disabled)) in start_critical_timing() 449 !data->critical_start || local_read(&data->disabled)) in stop_critical_timing()
|
| H A D | ring_buffer_benchmark.c | 126 commit = local_read(&rpage->commit) & 0xfffff; in read_page()
|
| /linux/arch/alpha/include/asm/ |
| H A D | local.h | 14 #define local_read(l) atomic_long_read(&(l)->a) macro 79 long c = local_read(l); in local_add_unless()
|
| /linux/arch/loongarch/include/asm/ |
| H A D | local.h | 20 #define local_read(l) atomic_long_read(&(l)->a) macro 121 long c = local_read(l); in local_add_unless()
|
| /linux/arch/mips/include/asm/ |
| H A D | local.h | 19 #define local_read(l) atomic_long_read(&(l)->a) macro 122 long c = local_read(l); in local_add_unless()
|
| /linux/Documentation/core-api/ |
| H A D | local_ops.rst | 121 the data seen by local_read across CPUs must be considered to be out of order 126 sum += local_read(&per_cpu(counters, cpu)); 128 If you want to use a remote local_read to synchronize access to a resource 178 local_read(&per_cpu(counters, cpu)));
|
| /linux/arch/x86/events/intel/ |
| H A D | bts.c | 153 index = local_read(&bb->head); in bts_config_buffer() 501 old_head = local_read(&bb->head); in intel_bts_interrupt() 505 if (old_head == local_read(&bb->head)) in intel_bts_interrupt()
|
| /linux/arch/powerpc/include/asm/ |
| H A D | local.h | 20 static __inline__ long local_read(const local_t *l) in local_read() function
|
| /linux/arch/mips/math-emu/ |
| H A D | me-debugfs.c | 25 sum += local_read(pv); in fpuemu_stat_get()
|
| /linux/include/linux/qed/ |
| H A D | qed_rdma_if.h | 235 bool local_read; member
|
| /linux/drivers/net/ethernet/qlogic/qed/ |
| H A D | qed_rdma.c | 1517 params->local_read); in qed_rdma_register_tid()
|
| /linux/kernel/sched/ |
| H A D | ext.c | 816 if (local_read(&rq->scx.reenq_local_deferred)) { in run_deferred()
|