xref: /linux/tools/perf/util/mmap.c (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4  *
5  * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
6  * copyright notes.
7  */
8 
9 #include <sys/mman.h>
10 #include <inttypes.h>
11 #include <asm/bug.h>
12 #include <linux/zalloc.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <unistd.h> // sysconf()
16 #include <perf/mmap.h>
17 #ifdef HAVE_LIBNUMA_SUPPORT
18 #include <numaif.h>
19 #endif
20 #include "cpumap.h"
21 #include "debug.h"
22 #include "event.h"
23 #include "mmap.h"
24 #include "../perf.h"
25 #include <internal/lib.h> /* page_size */
26 
27 size_t mmap__mmap_len(struct mmap *map)
28 {
29 	return perf_mmap__mmap_len(&map->core);
30 }
31 
32 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
33 			       struct auxtrace_mmap_params *mp __maybe_unused,
34 			       void *userpg __maybe_unused,
35 			       int fd __maybe_unused)
36 {
37 	return 0;
38 }
39 
40 void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
41 {
42 }
43 
44 void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
45 				       off_t auxtrace_offset __maybe_unused,
46 				       unsigned int auxtrace_pages __maybe_unused,
47 				       bool auxtrace_overwrite __maybe_unused)
48 {
49 }
50 
51 void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
52 					  struct evlist *evlist __maybe_unused,
53 					  int idx __maybe_unused,
54 					  bool per_cpu __maybe_unused)
55 {
56 }
57 
58 #ifdef HAVE_AIO_SUPPORT
59 static int perf_mmap__aio_enabled(struct mmap *map)
60 {
61 	return map->aio.nr_cblocks > 0;
62 }
63 
64 #ifdef HAVE_LIBNUMA_SUPPORT
65 static int perf_mmap__aio_alloc(struct mmap *map, int idx)
66 {
67 	map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
68 				  MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
69 	if (map->aio.data[idx] == MAP_FAILED) {
70 		map->aio.data[idx] = NULL;
71 		return -1;
72 	}
73 
74 	return 0;
75 }
76 
77 static void perf_mmap__aio_free(struct mmap *map, int idx)
78 {
79 	if (map->aio.data[idx]) {
80 		munmap(map->aio.data[idx], mmap__mmap_len(map));
81 		map->aio.data[idx] = NULL;
82 	}
83 }
84 
85 static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
86 {
87 	void *data;
88 	size_t mmap_len;
89 	unsigned long node_mask;
90 
91 	if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
92 		data = map->aio.data[idx];
93 		mmap_len = mmap__mmap_len(map);
94 		node_mask = 1UL << cpu__get_node(cpu);
95 		if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) {
96 			pr_err("Failed to bind [%p-%p] AIO buffer to node %d: error %m\n",
97 				data, data + mmap_len, cpu__get_node(cpu));
98 			return -1;
99 		}
100 	}
101 
102 	return 0;
103 }
104 #else /* !HAVE_LIBNUMA_SUPPORT */
105 static int perf_mmap__aio_alloc(struct mmap *map, int idx)
106 {
107 	map->aio.data[idx] = malloc(mmap__mmap_len(map));
108 	if (map->aio.data[idx] == NULL)
109 		return -1;
110 
111 	return 0;
112 }
113 
114 static void perf_mmap__aio_free(struct mmap *map, int idx)
115 {
116 	zfree(&(map->aio.data[idx]));
117 }
118 
119 static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused,
120 		int cpu __maybe_unused, int affinity __maybe_unused)
121 {
122 	return 0;
123 }
124 #endif
125 
126 static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp)
127 {
128 	int delta_max, i, prio, ret;
129 
130 	map->aio.nr_cblocks = mp->nr_cblocks;
131 	if (map->aio.nr_cblocks) {
132 		map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *));
133 		if (!map->aio.aiocb) {
134 			pr_debug2("failed to allocate aiocb for data buffer, error %m\n");
135 			return -1;
136 		}
137 		map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb));
138 		if (!map->aio.cblocks) {
139 			pr_debug2("failed to allocate cblocks for data buffer, error %m\n");
140 			return -1;
141 		}
142 		map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *));
143 		if (!map->aio.data) {
144 			pr_debug2("failed to allocate data buffer, error %m\n");
145 			return -1;
146 		}
147 		delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
148 		for (i = 0; i < map->aio.nr_cblocks; ++i) {
149 			ret = perf_mmap__aio_alloc(map, i);
150 			if (ret == -1) {
151 				pr_debug2("failed to allocate data buffer area, error %m");
152 				return -1;
153 			}
154 			ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity);
155 			if (ret == -1)
156 				return -1;
157 			/*
158 			 * Use cblock.aio_fildes value different from -1
159 			 * to denote started aio write operation on the
160 			 * cblock so it requires explicit record__aio_sync()
161 			 * call prior the cblock may be reused again.
162 			 */
163 			map->aio.cblocks[i].aio_fildes = -1;
164 			/*
165 			 * Allocate cblocks with priority delta to have
166 			 * faster aio write system calls because queued requests
167 			 * are kept in separate per-prio queues and adding
168 			 * a new request will iterate thru shorter per-prio
169 			 * list. Blocks with numbers higher than
170 			 *  _SC_AIO_PRIO_DELTA_MAX go with priority 0.
171 			 */
172 			prio = delta_max - i;
173 			map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
174 		}
175 	}
176 
177 	return 0;
178 }
179 
180 static void perf_mmap__aio_munmap(struct mmap *map)
181 {
182 	int i;
183 
184 	for (i = 0; i < map->aio.nr_cblocks; ++i)
185 		perf_mmap__aio_free(map, i);
186 	if (map->aio.data)
187 		zfree(&map->aio.data);
188 	zfree(&map->aio.cblocks);
189 	zfree(&map->aio.aiocb);
190 }
191 #else /* !HAVE_AIO_SUPPORT */
192 static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused)
193 {
194 	return 0;
195 }
196 
197 static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused,
198 			       struct mmap_params *mp __maybe_unused)
199 {
200 	return 0;
201 }
202 
203 static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
204 {
205 }
206 #endif
207 
208 void mmap__munmap(struct mmap *map)
209 {
210 	perf_mmap__aio_munmap(map);
211 	if (map->data != NULL) {
212 		munmap(map->data, mmap__mmap_len(map));
213 		map->data = NULL;
214 	}
215 	auxtrace_mmap__munmap(&map->auxtrace_mmap);
216 }
217 
218 static void build_node_mask(int node, cpu_set_t *mask)
219 {
220 	int c, cpu, nr_cpus;
221 	const struct perf_cpu_map *cpu_map = NULL;
222 
223 	cpu_map = cpu_map__online();
224 	if (!cpu_map)
225 		return;
226 
227 	nr_cpus = perf_cpu_map__nr(cpu_map);
228 	for (c = 0; c < nr_cpus; c++) {
229 		cpu = cpu_map->map[c]; /* map c index to online cpu index */
230 		if (cpu__get_node(cpu) == node)
231 			CPU_SET(cpu, mask);
232 	}
233 }
234 
235 static void perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
236 {
237 	CPU_ZERO(&map->affinity_mask);
238 	if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
239 		build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
240 	else if (mp->affinity == PERF_AFFINITY_CPU)
241 		CPU_SET(map->core.cpu, &map->affinity_mask);
242 }
243 
244 int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
245 {
246 	if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) {
247 		pr_debug2("failed to mmap perf event ring buffer, error %d\n",
248 			  errno);
249 		return -1;
250 	}
251 
252 	perf_mmap__setup_affinity_mask(map, mp);
253 
254 	map->core.flush = mp->flush;
255 
256 	map->comp_level = mp->comp_level;
257 
258 	if (map->comp_level && !perf_mmap__aio_enabled(map)) {
259 		map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
260 				 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
261 		if (map->data == MAP_FAILED) {
262 			pr_debug2("failed to mmap data buffer, error %d\n",
263 					errno);
264 			map->data = NULL;
265 			return -1;
266 		}
267 	}
268 
269 	if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
270 				&mp->auxtrace_mp, map->core.base, fd))
271 		return -1;
272 
273 	return perf_mmap__aio_mmap(map, mp);
274 }
275 
276 int perf_mmap__push(struct mmap *md, void *to,
277 		    int push(struct mmap *map, void *to, void *buf, size_t size))
278 {
279 	u64 head = perf_mmap__read_head(&md->core);
280 	unsigned char *data = md->core.base + page_size;
281 	unsigned long size;
282 	void *buf;
283 	int rc = 0;
284 
285 	rc = perf_mmap__read_init(&md->core);
286 	if (rc < 0)
287 		return (rc == -EAGAIN) ? 1 : -1;
288 
289 	size = md->core.end - md->core.start;
290 
291 	if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) {
292 		buf = &data[md->core.start & md->core.mask];
293 		size = md->core.mask + 1 - (md->core.start & md->core.mask);
294 		md->core.start += size;
295 
296 		if (push(md, to, buf, size) < 0) {
297 			rc = -1;
298 			goto out;
299 		}
300 	}
301 
302 	buf = &data[md->core.start & md->core.mask];
303 	size = md->core.end - md->core.start;
304 	md->core.start += size;
305 
306 	if (push(md, to, buf, size) < 0) {
307 		rc = -1;
308 		goto out;
309 	}
310 
311 	md->core.prev = head;
312 	perf_mmap__consume(&md->core);
313 out:
314 	return rc;
315 }
316