Lines Matching defs:ev_fd
30 struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
32 eventfd_ctx_put(ev_fd->cq_ev_fd);
33 kfree(ev_fd);
36 static void io_eventfd_put(struct io_ev_fd *ev_fd)
38 if (refcount_dec_and_test(&ev_fd->refs))
39 call_rcu(&ev_fd->rcu, io_eventfd_free);
44 struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
46 eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
47 io_eventfd_put(ev_fd);
51 * Returns true if the caller should put the ev_fd reference, false if not.
53 static bool __io_eventfd_signal(struct io_ev_fd *ev_fd)
56 eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
59 if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops)) {
60 call_rcu_hurry(&ev_fd->rcu, io_eventfd_do_signal);
70 static bool io_eventfd_trigger(struct io_ev_fd *ev_fd)
72 return !ev_fd->eventfd_async || io_wq_current_is_worker();
78 struct io_ev_fd *ev_fd;
84 ev_fd = rcu_dereference(ctx->io_ev_fd);
86 * Check again if ev_fd exists in case an io_eventfd_unregister call
90 if (!ev_fd)
92 if (!io_eventfd_trigger(ev_fd) || !refcount_inc_not_zero(&ev_fd->refs))
106 skip = ctx->cached_cq_tail == ev_fd->last_cq_tail;
107 ev_fd->last_cq_tail = ctx->cached_cq_tail;
111 if (skip || __io_eventfd_signal(ev_fd))
112 io_eventfd_put(ev_fd);
118 struct io_ev_fd *ev_fd;
122 ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
124 if (ev_fd)
130 ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL);
131 if (!ev_fd)
134 ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd);
135 if (IS_ERR(ev_fd->cq_ev_fd)) {
136 int ret = PTR_ERR(ev_fd->cq_ev_fd);
138 kfree(ev_fd);
143 ev_fd->last_cq_tail = ctx->cached_cq_tail;
146 ev_fd->eventfd_async = eventfd_async;
148 refcount_set(&ev_fd->refs, 1);
149 atomic_set(&ev_fd->ops, 0);
150 rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
156 struct io_ev_fd *ev_fd;
158 ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
160 if (ev_fd) {
163 io_eventfd_put(ev_fd);