1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _KERNEL_EVENTS_INTERNAL_H
3 #define _KERNEL_EVENTS_INTERNAL_H
4
5 #include <linux/hardirq.h>
6 #include <linux/uaccess.h>
7 #include <linux/refcount.h>
8
9 /* Buffer handling */
10
11 #define RING_BUFFER_WRITABLE 0x01
12
13 struct perf_buffer {
14 refcount_t refcount;
15 struct rcu_head rcu_head;
16 #ifdef CONFIG_PERF_USE_VMALLOC
17 struct work_struct work;
18 int page_order; /* allocation order */
19 #endif
20 int nr_pages; /* nr of data pages */
21 int overwrite; /* can overwrite itself */
22 int paused; /* can write into ring buffer */
23
24 atomic_t poll; /* POLL_ for wakeups */
25
26 local_t head; /* write position */
27 unsigned int nest; /* nested writers */
28 local_t events; /* event limit */
29 local_t wakeup; /* wakeup stamp */
30 local_t lost; /* nr records lost */
31
32 long watermark; /* wakeup watermark */
33 long aux_watermark;
34 /* poll crap */
35 spinlock_t event_lock;
36 struct list_head event_list;
37
38 refcount_t mmap_count;
39 unsigned long mmap_locked;
40 struct user_struct *mmap_user;
41
42 /* AUX area */
43 struct mutex aux_mutex;
44 long aux_head;
45 unsigned int aux_nest;
46 long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
47 unsigned long aux_pgoff;
48 int aux_nr_pages;
49 int aux_overwrite;
50 refcount_t aux_mmap_count;
51 unsigned long aux_mmap_locked;
52 void (*free_aux)(void *);
53 refcount_t aux_refcount;
54 int aux_in_sampling;
55 int aux_in_pause_resume;
56 void **aux_pages;
57 void *aux_priv;
58
59 struct perf_event_mmap_page *user_page;
60 void *data_pages[];
61 };
62
63 extern void rb_free(struct perf_buffer *rb);
64
rb_free_rcu(struct rcu_head * rcu_head)65 static inline void rb_free_rcu(struct rcu_head *rcu_head)
66 {
67 struct perf_buffer *rb;
68
69 rb = container_of(rcu_head, struct perf_buffer, rcu_head);
70 free_uid(rb->mmap_user);
71 rb_free(rb);
72 }
73
rb_toggle_paused(struct perf_buffer * rb,bool pause)74 static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause)
75 {
76 if (!pause && rb->nr_pages)
77 rb->paused = 0;
78 else
79 rb->paused = 1;
80 }
81
82 extern struct perf_buffer *
83 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
84 extern void perf_event_wakeup(struct perf_event *event);
85 extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
86 pgoff_t pgoff, int nr_pages, long watermark, int flags);
87 extern void rb_free_aux(struct perf_buffer *rb);
88 extern struct perf_buffer *ring_buffer_get(struct perf_event *event);
89 extern void ring_buffer_put(struct perf_buffer *rb);
90
rb_has_aux(struct perf_buffer * rb)91 static inline bool rb_has_aux(struct perf_buffer *rb)
92 {
93 return !!rb->aux_nr_pages;
94 }
95
96 void perf_event_aux_event(struct perf_event *event, unsigned long head,
97 unsigned long size, u64 flags);
98
99 extern struct page *
100 perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff);
101
102 #ifdef CONFIG_PERF_USE_VMALLOC
103 /*
104 * Back perf_mmap() with vmalloc memory.
105 *
106 * Required for architectures that have d-cache aliasing issues.
107 */
108
page_order(struct perf_buffer * rb)109 static inline int page_order(struct perf_buffer *rb)
110 {
111 return rb->page_order;
112 }
113
114 #else
115
page_order(struct perf_buffer * rb)116 static inline int page_order(struct perf_buffer *rb)
117 {
118 return 0;
119 }
120 #endif
121
data_page_nr(struct perf_buffer * rb)122 static inline int data_page_nr(struct perf_buffer *rb)
123 {
124 return rb->nr_pages << page_order(rb);
125 }
126
perf_data_size(struct perf_buffer * rb)127 static inline unsigned long perf_data_size(struct perf_buffer *rb)
128 {
129 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
130 }
131
perf_aux_size(struct perf_buffer * rb)132 static inline unsigned long perf_aux_size(struct perf_buffer *rb)
133 {
134 return (unsigned long)rb->aux_nr_pages << PAGE_SHIFT;
135 }
136
137 #define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \
138 { \
139 unsigned long size, written; \
140 \
141 do { \
142 size = min(handle->size, len); \
143 written = memcpy_func(__VA_ARGS__); \
144 written = size - written; \
145 \
146 len -= written; \
147 handle->addr += written; \
148 if (advance_buf) \
149 buf += written; \
150 handle->size -= written; \
151 if (!handle->size) { \
152 struct perf_buffer *rb = handle->rb; \
153 \
154 handle->page++; \
155 handle->page &= rb->nr_pages - 1; \
156 handle->addr = rb->data_pages[handle->page]; \
157 handle->size = PAGE_SIZE << page_order(rb); \
158 } \
159 } while (len && written == size); \
160 \
161 return len; \
162 }
163
164 #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
165 static inline unsigned long \
166 func_name(struct perf_output_handle *handle, \
167 const void *buf, unsigned long len) \
168 __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
169
170 static inline unsigned long
__output_custom(struct perf_output_handle * handle,perf_copy_f copy_func,const void * buf,unsigned long len)171 __output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
172 const void *buf, unsigned long len)
173 {
174 unsigned long orig_len = len;
175 __DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
176 orig_len - len, size)
177 }
178
179 static inline unsigned long
memcpy_common(void * dst,const void * src,unsigned long n)180 memcpy_common(void *dst, const void *src, unsigned long n)
181 {
182 memcpy(dst, src, n);
183 return 0;
184 }
185
DEFINE_OUTPUT_COPY(__output_copy,memcpy_common)186 DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
187
188 static inline unsigned long
189 memcpy_skip(void *dst, const void *src, unsigned long n)
190 {
191 return 0;
192 }
193
DEFINE_OUTPUT_COPY(__output_skip,memcpy_skip)194 DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
195
196 #ifndef arch_perf_out_copy_user
197 #define arch_perf_out_copy_user arch_perf_out_copy_user
198
199 static inline unsigned long
200 arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
201 {
202 unsigned long ret;
203
204 pagefault_disable();
205 ret = __copy_from_user_inatomic(dst, src, n);
206 pagefault_enable();
207
208 return ret;
209 }
210 #endif
211
DEFINE_OUTPUT_COPY(__output_copy_user,arch_perf_out_copy_user)212 DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
213
214 static inline int get_recursion_context(u8 *recursion)
215 {
216 unsigned char rctx = interrupt_context_level();
217
218 if (recursion[rctx])
219 return -1;
220
221 recursion[rctx]++;
222 barrier();
223
224 return rctx;
225 }
226
put_recursion_context(u8 * recursion,unsigned char rctx)227 static inline void put_recursion_context(u8 *recursion, unsigned char rctx)
228 {
229 barrier();
230 recursion[rctx]--;
231 }
232
233 #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
arch_perf_have_user_stack_dump(void)234 static inline bool arch_perf_have_user_stack_dump(void)
235 {
236 return true;
237 }
238
239 #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
240 #else
arch_perf_have_user_stack_dump(void)241 static inline bool arch_perf_have_user_stack_dump(void)
242 {
243 return false;
244 }
245
246 #define perf_user_stack_pointer(regs) 0
247 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
248
249 #endif /* _KERNEL_EVENTS_INTERNAL_H */
250