xref: /linux/kernel/events/internal.h (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 #ifndef _KERNEL_EVENTS_INTERNAL_H
2 #define _KERNEL_EVENTS_INTERNAL_H
3 
4 #include <linux/hardirq.h>
5 #include <linux/uaccess.h>
6 
7 /* Buffer handling */
8 
9 #define RING_BUFFER_WRITABLE		0x01
10 
11 struct ring_buffer {
12 	atomic_t			refcount;
13 	struct rcu_head			rcu_head;
14 	struct irq_work			irq_work;
15 #ifdef CONFIG_PERF_USE_VMALLOC
16 	struct work_struct		work;
17 	int				page_order;	/* allocation order  */
18 #endif
19 	int				nr_pages;	/* nr of data pages  */
20 	int				overwrite;	/* can overwrite itself */
21 
22 	atomic_t			poll;		/* POLL_ for wakeups */
23 
24 	local_t				head;		/* write position    */
25 	local_t				nest;		/* nested writers    */
26 	local_t				events;		/* event limit       */
27 	local_t				wakeup;		/* wakeup stamp      */
28 	local_t				lost;		/* nr records lost   */
29 
30 	long				watermark;	/* wakeup watermark  */
31 	long				aux_watermark;
32 	/* poll crap */
33 	spinlock_t			event_lock;
34 	struct list_head		event_list;
35 
36 	atomic_t			mmap_count;
37 	unsigned long			mmap_locked;
38 	struct user_struct		*mmap_user;
39 
40 	/* AUX area */
41 	local_t				aux_head;
42 	local_t				aux_nest;
43 	local_t				aux_wakeup;
44 	unsigned long			aux_pgoff;
45 	int				aux_nr_pages;
46 	int				aux_overwrite;
47 	atomic_t			aux_mmap_count;
48 	unsigned long			aux_mmap_locked;
49 	void				(*free_aux)(void *);
50 	atomic_t			aux_refcount;
51 	void				**aux_pages;
52 	void				*aux_priv;
53 
54 	struct perf_event_mmap_page	*user_page;
55 	void				*data_pages[0];
56 };
57 
58 extern void rb_free(struct ring_buffer *rb);
59 
60 static inline void rb_free_rcu(struct rcu_head *rcu_head)
61 {
62 	struct ring_buffer *rb;
63 
64 	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
65 	rb_free(rb);
66 }
67 
68 extern struct ring_buffer *
69 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
70 extern void perf_event_wakeup(struct perf_event *event);
71 extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
72 			pgoff_t pgoff, int nr_pages, long watermark, int flags);
73 extern void rb_free_aux(struct ring_buffer *rb);
74 extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
75 extern void ring_buffer_put(struct ring_buffer *rb);
76 
77 static inline bool rb_has_aux(struct ring_buffer *rb)
78 {
79 	return !!rb->aux_nr_pages;
80 }
81 
82 void perf_event_aux_event(struct perf_event *event, unsigned long head,
83 			  unsigned long size, u64 flags);
84 
85 extern struct page *
86 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
87 
88 #ifdef CONFIG_PERF_USE_VMALLOC
89 /*
90  * Back perf_mmap() with vmalloc memory.
91  *
92  * Required for architectures that have d-cache aliasing issues.
93  */
94 
95 static inline int page_order(struct ring_buffer *rb)
96 {
97 	return rb->page_order;
98 }
99 
100 #else
101 
102 static inline int page_order(struct ring_buffer *rb)
103 {
104 	return 0;
105 }
106 #endif
107 
108 static inline unsigned long perf_data_size(struct ring_buffer *rb)
109 {
110 	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
111 }
112 
113 static inline unsigned long perf_aux_size(struct ring_buffer *rb)
114 {
115 	return rb->aux_nr_pages << PAGE_SHIFT;
116 }
117 
118 #define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
119 static inline unsigned long						\
120 func_name(struct perf_output_handle *handle,				\
121 	  const void *buf, unsigned long len)				\
122 {									\
123 	unsigned long size, written;					\
124 									\
125 	do {								\
126 		size    = min(handle->size, len);			\
127 		written = memcpy_func(handle->addr, buf, size);		\
128 		written = size - written;				\
129 									\
130 		len -= written;						\
131 		handle->addr += written;				\
132 		buf += written;						\
133 		handle->size -= written;				\
134 		if (!handle->size) {					\
135 			struct ring_buffer *rb = handle->rb;		\
136 									\
137 			handle->page++;					\
138 			handle->page &= rb->nr_pages - 1;		\
139 			handle->addr = rb->data_pages[handle->page];	\
140 			handle->size = PAGE_SIZE << page_order(rb);	\
141 		}							\
142 	} while (len && written == size);				\
143 									\
144 	return len;							\
145 }
146 
147 static inline unsigned long
148 memcpy_common(void *dst, const void *src, unsigned long n)
149 {
150 	memcpy(dst, src, n);
151 	return 0;
152 }
153 
154 DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
155 
156 static inline unsigned long
157 memcpy_skip(void *dst, const void *src, unsigned long n)
158 {
159 	return 0;
160 }
161 
162 DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
163 
164 #ifndef arch_perf_out_copy_user
165 #define arch_perf_out_copy_user arch_perf_out_copy_user
166 
167 static inline unsigned long
168 arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
169 {
170 	unsigned long ret;
171 
172 	pagefault_disable();
173 	ret = __copy_from_user_inatomic(dst, src, n);
174 	pagefault_enable();
175 
176 	return ret;
177 }
178 #endif
179 
180 DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
181 
182 /* Callchain handling */
183 extern struct perf_callchain_entry *
184 perf_callchain(struct perf_event *event, struct pt_regs *regs);
185 extern int get_callchain_buffers(void);
186 extern void put_callchain_buffers(void);
187 
188 static inline int get_recursion_context(int *recursion)
189 {
190 	int rctx;
191 
192 	if (in_nmi())
193 		rctx = 3;
194 	else if (in_irq())
195 		rctx = 2;
196 	else if (in_softirq())
197 		rctx = 1;
198 	else
199 		rctx = 0;
200 
201 	if (recursion[rctx])
202 		return -1;
203 
204 	recursion[rctx]++;
205 	barrier();
206 
207 	return rctx;
208 }
209 
210 static inline void put_recursion_context(int *recursion, int rctx)
211 {
212 	barrier();
213 	recursion[rctx]--;
214 }
215 
216 #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
217 static inline bool arch_perf_have_user_stack_dump(void)
218 {
219 	return true;
220 }
221 
222 #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
223 #else
224 static inline bool arch_perf_have_user_stack_dump(void)
225 {
226 	return false;
227 }
228 
229 #define perf_user_stack_pointer(regs) 0
230 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
231 
232 #endif /* _KERNEL_EVENTS_INTERNAL_H */
233