1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
6 * copyright notes.
7 */
8
9 #include <sys/mman.h>
10 #include <errno.h>
11 #include <inttypes.h>
12 #include <asm/bug.h>
13 #include <linux/zalloc.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <unistd.h> // sysconf()
17 #include <perf/mmap.h>
18 #ifdef HAVE_LIBNUMA_SUPPORT
19 #include <numaif.h>
20 #endif
21 #include "cpumap.h"
22 #include "debug.h"
23 #include "event.h"
24 #include "mmap.h"
25 #include "../perf.h"
26 #include <internal/lib.h> /* page_size */
27 #include <linux/bitmap.h>
28
29 #define MASK_SIZE 1023
mmap_cpu_mask__scnprintf(struct mmap_cpu_mask * mask,const char * tag)30 void mmap_cpu_mask__scnprintf(struct mmap_cpu_mask *mask, const char *tag)
31 {
32 char buf[MASK_SIZE + 1];
33 size_t len;
34
35 len = bitmap_scnprintf(mask->bits, mask->nbits, buf, MASK_SIZE);
36 buf[len] = '\0';
37 pr_debug("%p: %s mask[%zd]: %s\n", mask, tag, mask->nbits, buf);
38 }
39
mmap__mmap_len(struct mmap * map)40 size_t mmap__mmap_len(struct mmap *map)
41 {
42 return perf_mmap__mmap_len(&map->core);
43 }
44
auxtrace_mmap__mmap(struct auxtrace_mmap * mm __maybe_unused,struct auxtrace_mmap_params * mp __maybe_unused,void * userpg __maybe_unused,int fd __maybe_unused)45 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
46 struct auxtrace_mmap_params *mp __maybe_unused,
47 void *userpg __maybe_unused,
48 int fd __maybe_unused)
49 {
50 return 0;
51 }
52
auxtrace_mmap__munmap(struct auxtrace_mmap * mm __maybe_unused)53 void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
54 {
55 }
56
auxtrace_mmap_params__init(struct auxtrace_mmap_params * mp __maybe_unused,off_t auxtrace_offset __maybe_unused,unsigned int auxtrace_pages __maybe_unused,bool auxtrace_overwrite __maybe_unused)57 void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
58 off_t auxtrace_offset __maybe_unused,
59 unsigned int auxtrace_pages __maybe_unused,
60 bool auxtrace_overwrite __maybe_unused)
61 {
62 }
63
auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params * mp __maybe_unused,struct evlist * evlist __maybe_unused,struct evsel * evsel __maybe_unused,int idx __maybe_unused)64 void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
65 struct evlist *evlist __maybe_unused,
66 struct evsel *evsel __maybe_unused,
67 int idx __maybe_unused)
68 {
69 }
70
71 #ifdef HAVE_AIO_SUPPORT
perf_mmap__aio_enabled(struct mmap * map)72 static int perf_mmap__aio_enabled(struct mmap *map)
73 {
74 return map->aio.nr_cblocks > 0;
75 }
76
77 #ifdef HAVE_LIBNUMA_SUPPORT
perf_mmap__aio_alloc(struct mmap * map,int idx)78 static int perf_mmap__aio_alloc(struct mmap *map, int idx)
79 {
80 map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
81 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
82 if (map->aio.data[idx] == MAP_FAILED) {
83 map->aio.data[idx] = NULL;
84 return -1;
85 }
86
87 return 0;
88 }
89
perf_mmap__aio_free(struct mmap * map,int idx)90 static void perf_mmap__aio_free(struct mmap *map, int idx)
91 {
92 if (map->aio.data[idx]) {
93 munmap(map->aio.data[idx], mmap__mmap_len(map));
94 map->aio.data[idx] = NULL;
95 }
96 }
97
perf_mmap__aio_bind(struct mmap * map,int idx,struct perf_cpu cpu,int affinity)98 static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, int affinity)
99 {
100 void *data;
101 size_t mmap_len;
102 unsigned long *node_mask;
103 unsigned long node_index;
104 int err = 0;
105
106 if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
107 data = map->aio.data[idx];
108 mmap_len = mmap__mmap_len(map);
109 node_index = cpu__get_node(cpu);
110 node_mask = bitmap_zalloc(node_index + 1);
111 if (!node_mask) {
112 pr_err("Failed to allocate node mask for mbind: error %m\n");
113 return -1;
114 }
115 __set_bit(node_index, node_mask);
116 if (mbind(data, mmap_len, MPOL_BIND, node_mask, node_index + 1 + 1, 0)) {
117 pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n",
118 data, data + mmap_len, node_index);
119 err = -1;
120 }
121 bitmap_free(node_mask);
122 }
123
124 return err;
125 }
126 #else /* !HAVE_LIBNUMA_SUPPORT */
perf_mmap__aio_alloc(struct mmap * map,int idx)127 static int perf_mmap__aio_alloc(struct mmap *map, int idx)
128 {
129 map->aio.data[idx] = malloc(mmap__mmap_len(map));
130 if (map->aio.data[idx] == NULL)
131 return -1;
132
133 return 0;
134 }
135
perf_mmap__aio_free(struct mmap * map,int idx)136 static void perf_mmap__aio_free(struct mmap *map, int idx)
137 {
138 zfree(&(map->aio.data[idx]));
139 }
140
perf_mmap__aio_bind(struct mmap * map __maybe_unused,int idx __maybe_unused,struct perf_cpu cpu __maybe_unused,int affinity __maybe_unused)141 static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused,
142 struct perf_cpu cpu __maybe_unused, int affinity __maybe_unused)
143 {
144 return 0;
145 }
146 #endif
147
perf_mmap__aio_mmap(struct mmap * map,struct mmap_params * mp)148 static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp)
149 {
150 int delta_max, i, prio, ret;
151
152 map->aio.nr_cblocks = mp->nr_cblocks;
153 if (map->aio.nr_cblocks) {
154 map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *));
155 if (!map->aio.aiocb) {
156 pr_debug2("failed to allocate aiocb for data buffer, error %m\n");
157 return -1;
158 }
159 map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb));
160 if (!map->aio.cblocks) {
161 pr_debug2("failed to allocate cblocks for data buffer, error %m\n");
162 return -1;
163 }
164 map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *));
165 if (!map->aio.data) {
166 pr_debug2("failed to allocate data buffer, error %m\n");
167 return -1;
168 }
169 delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
170 for (i = 0; i < map->aio.nr_cblocks; ++i) {
171 ret = perf_mmap__aio_alloc(map, i);
172 if (ret == -1) {
173 pr_debug2("failed to allocate data buffer area, error %m");
174 return -1;
175 }
176 ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity);
177 if (ret == -1)
178 return -1;
179 /*
180 * Use cblock.aio_fildes value different from -1
181 * to denote started aio write operation on the
182 * cblock so it requires explicit record__aio_sync()
183 * call prior the cblock may be reused again.
184 */
185 map->aio.cblocks[i].aio_fildes = -1;
186 /*
187 * Allocate cblocks with priority delta to have
188 * faster aio write system calls because queued requests
189 * are kept in separate per-prio queues and adding
190 * a new request will iterate thru shorter per-prio
191 * list. Blocks with numbers higher than
192 * _SC_AIO_PRIO_DELTA_MAX go with priority 0.
193 */
194 prio = delta_max - i;
195 map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
196 }
197 }
198
199 return 0;
200 }
201
perf_mmap__aio_munmap(struct mmap * map)202 static void perf_mmap__aio_munmap(struct mmap *map)
203 {
204 int i;
205
206 for (i = 0; i < map->aio.nr_cblocks; ++i)
207 perf_mmap__aio_free(map, i);
208 if (map->aio.data)
209 zfree(&map->aio.data);
210 zfree(&map->aio.cblocks);
211 zfree(&map->aio.aiocb);
212 }
213 #else /* !HAVE_AIO_SUPPORT */
perf_mmap__aio_enabled(struct mmap * map __maybe_unused)214 static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused)
215 {
216 return 0;
217 }
218
perf_mmap__aio_mmap(struct mmap * map __maybe_unused,struct mmap_params * mp __maybe_unused)219 static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused,
220 struct mmap_params *mp __maybe_unused)
221 {
222 return 0;
223 }
224
perf_mmap__aio_munmap(struct mmap * map __maybe_unused)225 static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
226 {
227 }
228 #endif
229
mmap__munmap(struct mmap * map)230 void mmap__munmap(struct mmap *map)
231 {
232 bitmap_free(map->affinity_mask.bits);
233
234 zstd_fini(&map->zstd_data);
235
236 perf_mmap__aio_munmap(map);
237 if (map->data != NULL) {
238 munmap(map->data, mmap__mmap_len(map));
239 map->data = NULL;
240 }
241 auxtrace_mmap__munmap(&map->auxtrace_mmap);
242 }
243
build_node_mask(int node,struct mmap_cpu_mask * mask)244 static void build_node_mask(int node, struct mmap_cpu_mask *mask)
245 {
246 int idx, nr_cpus;
247 struct perf_cpu cpu;
248 struct perf_cpu_map *cpu_map = cpu_map__online();
249
250 if (!cpu_map)
251 return;
252
253 nr_cpus = perf_cpu_map__nr(cpu_map);
254 for (idx = 0; idx < nr_cpus; idx++) {
255 cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */
256 if (cpu__get_node(cpu) == node)
257 __set_bit(cpu.cpu, mask->bits);
258 }
259 perf_cpu_map__put(cpu_map);
260 }
261
perf_mmap__setup_affinity_mask(struct mmap * map,struct mmap_params * mp)262 static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
263 {
264 map->affinity_mask.nbits = cpu__max_cpu().cpu;
265 map->affinity_mask.bits = bitmap_zalloc(map->affinity_mask.nbits);
266 if (!map->affinity_mask.bits)
267 return -1;
268
269 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
270 build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
271 else if (mp->affinity == PERF_AFFINITY_CPU)
272 __set_bit(map->core.cpu.cpu, map->affinity_mask.bits);
273
274 return 0;
275 }
276
mmap__mmap(struct mmap * map,struct mmap_params * mp,int fd,struct perf_cpu cpu)277 int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, struct perf_cpu cpu)
278 {
279 if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) {
280 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
281 errno);
282 return -1;
283 }
284
285 if (mp->affinity != PERF_AFFINITY_SYS &&
286 perf_mmap__setup_affinity_mask(map, mp)) {
287 pr_debug2("failed to alloc mmap affinity mask, error %d\n",
288 errno);
289 return -1;
290 }
291
292 if (verbose == 2)
293 mmap_cpu_mask__scnprintf(&map->affinity_mask, "mmap");
294
295 map->core.flush = mp->flush;
296
297 if (zstd_init(&map->zstd_data, mp->comp_level)) {
298 pr_debug2("failed to init mmap compressor, error %d\n", errno);
299 return -1;
300 }
301
302 if (mp->comp_level && !perf_mmap__aio_enabled(map)) {
303 map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
304 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
305 if (map->data == MAP_FAILED) {
306 pr_debug2("failed to mmap data buffer, error %d\n",
307 errno);
308 map->data = NULL;
309 return -1;
310 }
311 }
312
313 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
314 &mp->auxtrace_mp, map->core.base, fd))
315 return -1;
316
317 return perf_mmap__aio_mmap(map, mp);
318 }
319
perf_mmap__push(struct mmap * md,void * to,int push (struct mmap * map,void * to,void * buf,size_t size))320 int perf_mmap__push(struct mmap *md, void *to,
321 int push(struct mmap *map, void *to, void *buf, size_t size))
322 {
323 u64 head = perf_mmap__read_head(&md->core);
324 unsigned char *data = md->core.base + page_size;
325 unsigned long size;
326 void *buf;
327 int rc = 0;
328
329 rc = perf_mmap__read_init(&md->core);
330 if (rc < 0)
331 return (rc == -EAGAIN) ? 1 : -1;
332
333 size = md->core.end - md->core.start;
334
335 if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) {
336 buf = &data[md->core.start & md->core.mask];
337 size = md->core.mask + 1 - (md->core.start & md->core.mask);
338 md->core.start += size;
339
340 if (push(md, to, buf, size) < 0) {
341 rc = -1;
342 goto out;
343 }
344 }
345
346 buf = &data[md->core.start & md->core.mask];
347 size = md->core.end - md->core.start;
348 md->core.start += size;
349
350 if (push(md, to, buf, size) < 0) {
351 rc = -1;
352 goto out;
353 }
354
355 md->core.prev = head;
356 perf_mmap__consume(&md->core);
357 out:
358 return rc;
359 }
360