Lines Matching defs:aio

337 		 * every aio write request started in record__aio_push() so
344 * aio write request may require restart with the
360 struct aiocb **aiocb = md->aio.aiocb;
361 struct aiocb *cblocks = md->aio.cblocks;
367 for (i = 0; i < md->aio.nr_cblocks; ++i) {
375 * Started aio write is not complete yet
386 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
401 struct record_aio *aio = to;
404 * map->core.base data pointed by buf is copied into free map->aio.data[] buffer
417 if (record__comp_enabled(aio->rec)) {
418 ssize_t compressed = zstd_compress(aio->rec->session, NULL, aio->data + aio->size,
419 mmap__mmap_len(map) - aio->size,
426 memcpy(aio->data + aio->size, buf, size);
429 if (!aio->size) {
431 * Increment map->refcount to guard map->aio.data[] buffer
433 * released earlier than aio write request started on
434 * map->aio.data[] buffer is complete.
437 * after started aio request completion or at record__aio_push()
443 aio->size += size;
452 struct record_aio aio = { .rec = rec, .size = 0 };
455 * Call record__aio_sync() to wait till map->aio.data[] buffer
456 * becomes available after previous aio write operation.
460 aio.data = map->aio.data[idx];
461 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
466 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
468 *off += aio.size;
469 rec->bytes_written += aio.size;
477 * aio write operation finishes successfully.
2972 if (!strcmp(var, "record.aio")) {
3527 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
4053 pr_err("Asynchronous streaming mode (--aio) is mutually exclusive to parallel streaming mode.\n");