xref: /linux/kernel/events/internal.h (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 #ifndef _KERNEL_EVENTS_INTERNAL_H
2 #define _KERNEL_EVENTS_INTERNAL_H
3 
4 #include <linux/hardirq.h>
5 
6 /* Buffer handling */
7 
8 #define RING_BUFFER_WRITABLE		0x01
9 
10 struct ring_buffer {
11 	atomic_t			refcount;
12 	struct rcu_head			rcu_head;
13 #ifdef CONFIG_PERF_USE_VMALLOC
14 	struct work_struct		work;
15 	int				page_order;	/* allocation order  */
16 #endif
17 	int				nr_pages;	/* nr of data pages  */
18 	int				writable;	/* are we writable   */
19 
20 	atomic_t			poll;		/* POLL_ for wakeups */
21 
22 	local_t				head;		/* write position    */
23 	local_t				nest;		/* nested writers    */
24 	local_t				events;		/* event limit       */
25 	local_t				wakeup;		/* wakeup stamp      */
26 	local_t				lost;		/* nr records lost   */
27 
28 	long				watermark;	/* wakeup watermark  */
29 	/* poll crap */
30 	spinlock_t			event_lock;
31 	struct list_head		event_list;
32 
33 	struct perf_event_mmap_page	*user_page;
34 	void				*data_pages[0];
35 };
36 
37 extern void rb_free(struct ring_buffer *rb);
38 extern struct ring_buffer *
39 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
40 extern void perf_event_wakeup(struct perf_event *event);
41 
42 extern void
43 perf_event_header__init_id(struct perf_event_header *header,
44 			   struct perf_sample_data *data,
45 			   struct perf_event *event);
46 extern void
47 perf_event__output_id_sample(struct perf_event *event,
48 			     struct perf_output_handle *handle,
49 			     struct perf_sample_data *sample);
50 
51 extern struct page *
52 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
53 
54 #ifdef CONFIG_PERF_USE_VMALLOC
55 /*
56  * Back perf_mmap() with vmalloc memory.
57  *
58  * Required for architectures that have d-cache aliasing issues.
59  */
60 
61 static inline int page_order(struct ring_buffer *rb)
62 {
63 	return rb->page_order;
64 }
65 
66 #else
67 
68 static inline int page_order(struct ring_buffer *rb)
69 {
70 	return 0;
71 }
72 #endif
73 
74 static inline unsigned long perf_data_size(struct ring_buffer *rb)
75 {
76 	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
77 }
78 
79 static inline void
80 __output_copy(struct perf_output_handle *handle,
81 		   const void *buf, unsigned int len)
82 {
83 	do {
84 		unsigned long size = min_t(unsigned long, handle->size, len);
85 
86 		memcpy(handle->addr, buf, size);
87 
88 		len -= size;
89 		handle->addr += size;
90 		buf += size;
91 		handle->size -= size;
92 		if (!handle->size) {
93 			struct ring_buffer *rb = handle->rb;
94 
95 			handle->page++;
96 			handle->page &= rb->nr_pages - 1;
97 			handle->addr = rb->data_pages[handle->page];
98 			handle->size = PAGE_SIZE << page_order(rb);
99 		}
100 	} while (len);
101 }
102 
103 /* Callchain handling */
104 extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
105 extern int get_callchain_buffers(void);
106 extern void put_callchain_buffers(void);
107 
108 static inline int get_recursion_context(int *recursion)
109 {
110 	int rctx;
111 
112 	if (in_nmi())
113 		rctx = 3;
114 	else if (in_irq())
115 		rctx = 2;
116 	else if (in_softirq())
117 		rctx = 1;
118 	else
119 		rctx = 0;
120 
121 	if (recursion[rctx])
122 		return -1;
123 
124 	recursion[rctx]++;
125 	barrier();
126 
127 	return rctx;
128 }
129 
130 static inline void put_recursion_context(int *recursion, int rctx)
131 {
132 	barrier();
133 	recursion[rctx]--;
134 }
135 
136 #endif /* _KERNEL_EVENTS_INTERNAL_H */
137