Lines Matching full:cqe

534 		struct io_uring_cqe *cqe;  in __io_cqring_overflow_flush()  local
540 if (ocqe->cqe.flags & IORING_CQE_F_32 || in __io_cqring_overflow_flush()
549 if (!io_get_cqe_overflow(ctx, &cqe, true, is_cqe32)) in __io_cqring_overflow_flush()
551 memcpy(cqe, &ocqe->cqe, cqe_size); in __io_cqring_overflow_flush()
661 struct io_cqe *cqe, in io_alloc_ocqe() argument
668 if (cqe->flags & IORING_CQE_F_32 || ctx->flags & IORING_SETUP_CQE32) { in io_alloc_ocqe()
674 trace_io_uring_cqe_overflow(ctx, cqe->user_data, cqe->res, cqe->flags, ocqe); in io_alloc_ocqe()
676 ocqe->cqe.user_data = cqe->user_data; in io_alloc_ocqe()
677 ocqe->cqe.res = cqe->res; in io_alloc_ocqe()
678 ocqe->cqe.flags = cqe->flags; in io_alloc_ocqe()
680 ocqe->cqe.big_cqe[0] = big_cqe->extra1; in io_alloc_ocqe()
681 ocqe->cqe.big_cqe[1] = big_cqe->extra2; in io_alloc_ocqe()
690 * Fill an empty dummy CQE, in case alignment is off for posting a 32b CQE
696 struct io_uring_cqe *cqe = &ctx->rings->cqes[off]; in io_fill_nop_cqe() local
698 cqe->user_data = 0; in io_fill_nop_cqe()
699 cqe->res = 0; in io_fill_nop_cqe()
700 cqe->flags = IORING_CQE_F_SKIP; in io_fill_nop_cqe()
727 * Post dummy CQE if a 32b CQE is needed and there's only room for a in io_cqe_cache_refill()
728 * 16b CQE before the ring wraps. in io_cqe_cache_refill()
757 struct io_uring_cqe *cqe; in io_fill_cqe_aux32() local
761 if (unlikely(!io_get_cqe(ctx, &cqe, true))) in io_fill_cqe_aux32()
764 memcpy(cqe, src_cqe, 2 * sizeof(*cqe)); in io_fill_cqe_aux32()
765 trace_io_uring_complete(ctx, NULL, cqe); in io_fill_cqe_aux32()
773 struct io_uring_cqe *cqe; in io_fill_cqe_aux() local
775 if (likely(io_get_cqe(ctx, &cqe, cqe32))) { in io_fill_cqe_aux()
776 WRITE_ONCE(cqe->user_data, user_data); in io_fill_cqe_aux()
777 WRITE_ONCE(cqe->res, res); in io_fill_cqe_aux()
778 WRITE_ONCE(cqe->flags, cflags); in io_fill_cqe_aux()
781 WRITE_ONCE(cqe->big_cqe[0], 0); in io_fill_cqe_aux()
782 WRITE_ONCE(cqe->big_cqe[1], 0); in io_fill_cqe_aux()
785 trace_io_uring_complete(ctx, NULL, cqe); in io_fill_cqe_aux()
796 static __cold void io_cqe_overflow(struct io_ring_ctx *ctx, struct io_cqe *cqe, in io_cqe_overflow() argument
801 ocqe = io_alloc_ocqe(ctx, cqe, big_cqe, GFP_KERNEL); in io_cqe_overflow()
808 struct io_cqe *cqe, in io_cqe_overflow_locked() argument
813 ocqe = io_alloc_ocqe(ctx, cqe, big_cqe, GFP_NOWAIT); in io_cqe_overflow_locked()
824 struct io_cqe cqe = io_init_cqe(user_data, res, cflags); in io_post_aux_cqe() local
826 filled = io_cqe_overflow_locked(ctx, &cqe, NULL); in io_post_aux_cqe()
842 struct io_cqe cqe = io_init_cqe(user_data, res, cflags); in io_add_aux_cqe() local
844 io_cqe_overflow(ctx, &cqe, NULL); in io_add_aux_cqe()
871 posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags); in io_req_post_cqe()
874 posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags); in io_req_post_cqe()
885 bool io_req_post_cqe32(struct io_kiocb *req, struct io_uring_cqe cqe[2]) in io_req_post_cqe32()
893 cqe[0].user_data = req->cqe.user_data; in io_req_post_cqe32()
896 posted = io_fill_cqe_aux32(ctx, cqe); in io_req_post_cqe32()
899 posted = io_fill_cqe_aux32(ctx, cqe); in io_req_post_cqe32()
1036 io_req_defer_failed(req, req->cqe.res); in io_req_task_cancel()
1140 * Requests marked with REQUEUE should not post a CQE, they in __io_submit_flush_completions()
1147 io_cqe_overflow(ctx, &req->cqe, &req->big_cqe); in __io_submit_flush_completions()
1149 io_cqe_overflow_locked(ctx, &req->cqe, &req->big_cqe); in __io_submit_flush_completions()
1213 * dropped CQE. in io_iopoll_check()
1221 * already triggered a CQE (eg in error). in io_iopoll_check()
1360 req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags); in io_assign_file()
1362 req->file = io_file_get_normal(req, req->cqe.fd); in io_assign_file()
1656 io_req_defer_failed(req, req->cqe.res); in io_queue_sqe_fallback()
1731 req->cqe.user_data = READ_ONCE(sqe->user_data); in io_init_req()
1801 req->cqe.fd = READ_ONCE(sqe->fd); in io_init_req()
2682 * EBADR indicates that one or more CQE were dropped. in SYSCALL_DEFINE6()
2831 * Nonsensical to ask for CQE32 and mixed CQE support, it's not in io_uring_sanitise_params()