xref: /linux/tools/perf/perf.h (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 #ifndef _PERF_PERF_H
2 #define _PERF_PERF_H
3 
4 struct winsize;
5 
6 void get_term_dimensions(struct winsize *ws);
7 
8 #if defined(__i386__)
9 #include "../../arch/x86/include/asm/unistd.h"
10 #define rmb()		asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
11 #define cpu_relax()	asm volatile("rep; nop" ::: "memory");
12 #define CPUINFO_PROC	"model name"
13 #endif
14 
15 #if defined(__x86_64__)
16 #include "../../arch/x86/include/asm/unistd.h"
17 #define rmb()		asm volatile("lfence" ::: "memory")
18 #define cpu_relax()	asm volatile("rep; nop" ::: "memory");
19 #define CPUINFO_PROC	"model name"
20 #endif
21 
22 #ifdef __powerpc__
23 #include "../../arch/powerpc/include/asm/unistd.h"
24 #define rmb()		asm volatile ("sync" ::: "memory")
25 #define cpu_relax()	asm volatile ("" ::: "memory");
26 #define CPUINFO_PROC	"cpu"
27 #endif
28 
29 #ifdef __s390__
30 #include "../../arch/s390/include/asm/unistd.h"
31 #define rmb()		asm volatile("bcr 15,0" ::: "memory")
32 #define cpu_relax()	asm volatile("" ::: "memory");
33 #endif
34 
35 #ifdef __sh__
36 #include "../../arch/sh/include/asm/unistd.h"
37 #if defined(__SH4A__) || defined(__SH5__)
38 # define rmb()		asm volatile("synco" ::: "memory")
39 #else
40 # define rmb()		asm volatile("" ::: "memory")
41 #endif
42 #define cpu_relax()	asm volatile("" ::: "memory")
43 #define CPUINFO_PROC	"cpu type"
44 #endif
45 
46 #ifdef __hppa__
47 #include "../../arch/parisc/include/asm/unistd.h"
48 #define rmb()		asm volatile("" ::: "memory")
49 #define cpu_relax()	asm volatile("" ::: "memory");
50 #define CPUINFO_PROC	"cpu"
51 #endif
52 
53 #ifdef __sparc__
54 #include "../../arch/sparc/include/asm/unistd.h"
55 #define rmb()		asm volatile("":::"memory")
56 #define cpu_relax()	asm volatile("":::"memory")
57 #define CPUINFO_PROC	"cpu"
58 #endif
59 
60 #ifdef __alpha__
61 #include "../../arch/alpha/include/asm/unistd.h"
62 #define rmb()		asm volatile("mb" ::: "memory")
63 #define cpu_relax()	asm volatile("" ::: "memory")
64 #define CPUINFO_PROC	"cpu model"
65 #endif
66 
67 #ifdef __ia64__
68 #include "../../arch/ia64/include/asm/unistd.h"
69 #define rmb()		asm volatile ("mf" ::: "memory")
70 #define cpu_relax()	asm volatile ("hint @pause" ::: "memory")
71 #define CPUINFO_PROC	"model name"
72 #endif
73 
74 #ifdef __arm__
75 #include "../../arch/arm/include/asm/unistd.h"
76 /*
77  * Use the __kuser_memory_barrier helper in the CPU helper page. See
78  * arch/arm/kernel/entry-armv.S in the kernel source for details.
79  */
80 #define rmb()		((void(*)(void))0xffff0fa0)()
81 #define cpu_relax()	asm volatile("":::"memory")
82 #define CPUINFO_PROC	"Processor"
83 #endif
84 
85 #ifdef __mips__
86 #include "../../arch/mips/include/asm/unistd.h"
87 #define rmb()		asm volatile(					\
88 				".set	mips2\n\t"			\
89 				"sync\n\t"				\
90 				".set	mips0"				\
91 				: /* no output */			\
92 				: /* no input */			\
93 				: "memory")
94 #define cpu_relax()	asm volatile("" ::: "memory")
95 #define CPUINFO_PROC	"cpu model"
96 #endif
97 
98 #include <time.h>
99 #include <unistd.h>
100 #include <sys/types.h>
101 #include <sys/syscall.h>
102 
103 #include "../../include/linux/perf_event.h"
104 #include "util/types.h"
105 #include <stdbool.h>
106 
107 struct perf_mmap {
108 	void			*base;
109 	int			mask;
110 	unsigned int		prev;
111 };
112 
113 static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
114 {
115 	struct perf_event_mmap_page *pc = mm->base;
116 	int head = pc->data_head;
117 	rmb();
118 	return head;
119 }
120 
121 static inline void perf_mmap__write_tail(struct perf_mmap *md,
122 					 unsigned long tail)
123 {
124 	struct perf_event_mmap_page *pc = md->base;
125 
126 	/*
127 	 * ensure all reads are done before we write the tail out.
128 	 */
129 	/* mb(); */
130 	pc->data_tail = tail;
131 }
132 
133 /*
134  * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
135  * counters in the current task.
136  */
137 #define PR_TASK_PERF_EVENTS_DISABLE   31
138 #define PR_TASK_PERF_EVENTS_ENABLE    32
139 
140 #ifndef NSEC_PER_SEC
141 # define NSEC_PER_SEC			1000000000ULL
142 #endif
143 
144 static inline unsigned long long rdclock(void)
145 {
146 	struct timespec ts;
147 
148 	clock_gettime(CLOCK_MONOTONIC, &ts);
149 	return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
150 }
151 
152 /*
153  * Pick up some kernel type conventions:
154  */
155 #define __user
156 #define asmlinkage
157 
158 #define unlikely(x)	__builtin_expect(!!(x), 0)
159 #define min(x, y) ({				\
160 	typeof(x) _min1 = (x);			\
161 	typeof(y) _min2 = (y);			\
162 	(void) (&_min1 == &_min2);		\
163 	_min1 < _min2 ? _min1 : _min2; })
164 
165 static inline int
166 sys_perf_event_open(struct perf_event_attr *attr,
167 		      pid_t pid, int cpu, int group_fd,
168 		      unsigned long flags)
169 {
170 	attr->size = sizeof(*attr);
171 	return syscall(__NR_perf_event_open, attr, pid, cpu,
172 		       group_fd, flags);
173 }
174 
175 #define MAX_COUNTERS			256
176 #define MAX_NR_CPUS			256
177 
178 struct ip_callchain {
179 	u64 nr;
180 	u64 ips[0];
181 };
182 
183 extern bool perf_host, perf_guest;
184 extern const char perf_version_string[];
185 
186 void pthread__unblock_sigwinch(void);
187 
188 #endif
189