Lines Matching full:map

39 size_t mmap__mmap_len(struct mmap *map)
41 return perf_mmap__mmap_len(&map->core);
71 static int perf_mmap__aio_enabled(struct mmap *map)
73 return map->aio.nr_cblocks > 0;
77 static int perf_mmap__aio_alloc(struct mmap *map, int idx)
79 map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
81 if (map->aio.data[idx] == MAP_FAILED) {
82 map->aio.data[idx] = NULL;
89 static void perf_mmap__aio_free(struct mmap *map, int idx)
91 if (map->aio.data[idx]) {
92 munmap(map->aio.data[idx], mmap__mmap_len(map));
93 map->aio.data[idx] = NULL;
97 static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, int affinity)
106 data = map->aio.data[idx];
107 mmap_len = mmap__mmap_len(map);
126 static int perf_mmap__aio_alloc(struct mmap *map, int idx)
128 map->aio.data[idx] = malloc(mmap__mmap_len(map));
129 if (map->aio.data[idx] == NULL)
135 static void perf_mmap__aio_free(struct mmap *map, int idx)
137 zfree(&(map->aio.data[idx]));
140 static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused,
147 static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp)
151 map->aio.nr_cblocks = mp->nr_cblocks;
152 if (map->aio.nr_cblocks) {
153 map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *));
154 if (!map->aio.aiocb) {
158 map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb));
159 if (!map->aio.cblocks) {
163 map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *));
164 if (!map->aio.data) {
169 for (i = 0; i < map->aio.nr_cblocks; ++i) {
170 ret = perf_mmap__aio_alloc(map, i);
175 ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity);
184 map->aio.cblocks[i].aio_fildes = -1;
194 map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
201 static void perf_mmap__aio_munmap(struct mmap *map)
205 for (i = 0; i < map->aio.nr_cblocks; ++i)
206 perf_mmap__aio_free(map, i);
207 if (map->aio.data)
208 zfree(&map->aio.data);
209 zfree(&map->aio.cblocks);
210 zfree(&map->aio.aiocb);
213 static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused)
218 static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused,
224 static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
229 void mmap__munmap(struct mmap *map)
231 bitmap_free(map->affinity_mask.bits);
233 zstd_fini(&map->zstd_data);
235 perf_mmap__aio_munmap(map);
236 if (map->data != NULL) {
237 munmap(map->data, mmap__mmap_len(map));
238 map->data = NULL;
240 auxtrace_mmap__munmap(&map->auxtrace_mmap);
254 cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */
261 static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
263 map->affinity_mask.nbits = cpu__max_cpu().cpu;
264 map->affinity_mask.bits = bitmap_zalloc(map->affinity_mask.nbits);
265 if (!map->affinity_mask.bits)
269 build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
271 __set_bit(map->core.cpu.cpu, map->affinity_mask.bits);
276 int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, struct perf_cpu cpu)
278 if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) {
285 perf_mmap__setup_affinity_mask(map, mp)) {
292 mmap_cpu_mask__scnprintf(&map->affinity_mask, "mmap");
294 map->core.flush = mp->flush;
296 if (zstd_init(&map->zstd_data, mp->comp_level)) {
301 if (mp->comp_level && !perf_mmap__aio_enabled(map)) {
302 map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
304 if (map->data == MAP_FAILED) {
307 map->data = NULL;
312 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
313 &mp->auxtrace_mp, map->core.base, fd))
316 return perf_mmap__aio_mmap(map, mp);
320 int push(struct mmap *map, void *to, void *buf, size_t size))