1 /* 2 * fs/eventfd.c 3 * 4 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> 5 * 6 */ 7 8 #include <linux/file.h> 9 #include <linux/poll.h> 10 #include <linux/init.h> 11 #include <linux/fs.h> 12 #include <linux/sched.h> 13 #include <linux/kernel.h> 14 #include <linux/list.h> 15 #include <linux/spinlock.h> 16 #include <linux/anon_inodes.h> 17 #include <linux/syscalls.h> 18 #include <linux/module.h> 19 #include <linux/kref.h> 20 #include <linux/eventfd.h> 21 22 struct eventfd_ctx { 23 struct kref kref; 24 wait_queue_head_t wqh; 25 /* 26 * Every time that a write(2) is performed on an eventfd, the 27 * value of the __u64 being written is added to "count" and a 28 * wakeup is performed on "wqh". A read(2) will return the "count" 29 * value to userspace, and will reset "count" to zero. The kernel 30 * side eventfd_signal() also, adds to the "count" counter and 31 * issue a wakeup. 32 */ 33 __u64 count; 34 unsigned int flags; 35 }; 36 37 /** 38 * eventfd_signal - Adds @n to the eventfd counter. 39 * @ctx: [in] Pointer to the eventfd context. 40 * @n: [in] Value of the counter to be added to the eventfd internal counter. 41 * The value cannot be negative. 42 * 43 * This function is supposed to be called by the kernel in paths that do not 44 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX 45 * value, and we signal this as overflow condition by returining a POLLERR 46 * to poll(2). 47 * 48 * Returns @n in case of success, a non-negative number lower than @n in case 49 * of overflow, or the following error codes: 50 * 51 * -EINVAL : The value of @n is negative. 52 */ 53 int eventfd_signal(struct eventfd_ctx *ctx, int n) 54 { 55 unsigned long flags; 56 57 if (n < 0) 58 return -EINVAL; 59 spin_lock_irqsave(&ctx->wqh.lock, flags); 60 if (ULLONG_MAX - ctx->count < n) 61 n = (int) (ULLONG_MAX - ctx->count); 62 ctx->count += n; 63 if (waitqueue_active(&ctx->wqh)) 64 wake_up_locked_poll(&ctx->wqh, POLLIN); 65 spin_unlock_irqrestore(&ctx->wqh.lock, flags); 66 67 return n; 68 } 69 EXPORT_SYMBOL_GPL(eventfd_signal); 70 71 static void eventfd_free_ctx(struct eventfd_ctx *ctx) 72 { 73 kfree(ctx); 74 } 75 76 static void eventfd_free(struct kref *kref) 77 { 78 struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref); 79 80 eventfd_free_ctx(ctx); 81 } 82 83 /** 84 * eventfd_ctx_get - Acquires a reference to the internal eventfd context. 85 * @ctx: [in] Pointer to the eventfd context. 86 * 87 * Returns: In case of success, returns a pointer to the eventfd context. 88 */ 89 struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx) 90 { 91 kref_get(&ctx->kref); 92 return ctx; 93 } 94 EXPORT_SYMBOL_GPL(eventfd_ctx_get); 95 96 /** 97 * eventfd_ctx_put - Releases a reference to the internal eventfd context. 98 * @ctx: [in] Pointer to eventfd context. 99 * 100 * The eventfd context reference must have been previously acquired either 101 * with eventfd_ctx_get() or eventfd_ctx_fdget()). 102 */ 103 void eventfd_ctx_put(struct eventfd_ctx *ctx) 104 { 105 kref_put(&ctx->kref, eventfd_free); 106 } 107 EXPORT_SYMBOL_GPL(eventfd_ctx_put); 108 109 static int eventfd_release(struct inode *inode, struct file *file) 110 { 111 struct eventfd_ctx *ctx = file->private_data; 112 113 wake_up_poll(&ctx->wqh, POLLHUP); 114 eventfd_ctx_put(ctx); 115 return 0; 116 } 117 118 static unsigned int eventfd_poll(struct file *file, poll_table *wait) 119 { 120 struct eventfd_ctx *ctx = file->private_data; 121 unsigned int events = 0; 122 unsigned long flags; 123 124 poll_wait(file, &ctx->wqh, wait); 125 126 spin_lock_irqsave(&ctx->wqh.lock, flags); 127 if (ctx->count > 0) 128 events |= POLLIN; 129 if (ctx->count == ULLONG_MAX) 130 events |= POLLERR; 131 if (ULLONG_MAX - 1 > ctx->count) 132 events |= POLLOUT; 133 spin_unlock_irqrestore(&ctx->wqh.lock, flags); 134 135 return events; 136 } 137 138 static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) 139 { 140 *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count; 141 ctx->count -= *cnt; 142 } 143 144 /** 145 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. 146 * @ctx: [in] Pointer to eventfd context. 147 * @wait: [in] Wait queue to be removed. 148 * @cnt: [out] Pointer to the 64bit conter value. 149 * 150 * Returns zero if successful, or the following error codes: 151 * 152 * -EAGAIN : The operation would have blocked. 153 * 154 * This is used to atomically remove a wait queue entry from the eventfd wait 155 * queue head, and read/reset the counter value. 156 */ 157 int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait, 158 __u64 *cnt) 159 { 160 unsigned long flags; 161 162 spin_lock_irqsave(&ctx->wqh.lock, flags); 163 eventfd_ctx_do_read(ctx, cnt); 164 __remove_wait_queue(&ctx->wqh, wait); 165 if (*cnt != 0 && waitqueue_active(&ctx->wqh)) 166 wake_up_locked_poll(&ctx->wqh, POLLOUT); 167 spin_unlock_irqrestore(&ctx->wqh.lock, flags); 168 169 return *cnt != 0 ? 0 : -EAGAIN; 170 } 171 EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue); 172 173 /** 174 * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero. 175 * @ctx: [in] Pointer to eventfd context. 176 * @no_wait: [in] Different from zero if the operation should not block. 177 * @cnt: [out] Pointer to the 64bit conter value. 178 * 179 * Returns zero if successful, or the following error codes: 180 * 181 * -EAGAIN : The operation would have blocked but @no_wait was nonzero. 182 * -ERESTARTSYS : A signal interrupted the wait operation. 183 * 184 * If @no_wait is zero, the function might sleep until the eventfd internal 185 * counter becomes greater than zero. 186 */ 187 ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt) 188 { 189 ssize_t res; 190 DECLARE_WAITQUEUE(wait, current); 191 192 spin_lock_irq(&ctx->wqh.lock); 193 *cnt = 0; 194 res = -EAGAIN; 195 if (ctx->count > 0) 196 res = 0; 197 else if (!no_wait) { 198 __add_wait_queue(&ctx->wqh, &wait); 199 for (;;) { 200 set_current_state(TASK_INTERRUPTIBLE); 201 if (ctx->count > 0) { 202 res = 0; 203 break; 204 } 205 if (signal_pending(current)) { 206 res = -ERESTARTSYS; 207 break; 208 } 209 spin_unlock_irq(&ctx->wqh.lock); 210 schedule(); 211 spin_lock_irq(&ctx->wqh.lock); 212 } 213 __remove_wait_queue(&ctx->wqh, &wait); 214 __set_current_state(TASK_RUNNING); 215 } 216 if (likely(res == 0)) { 217 eventfd_ctx_do_read(ctx, cnt); 218 if (waitqueue_active(&ctx->wqh)) 219 wake_up_locked_poll(&ctx->wqh, POLLOUT); 220 } 221 spin_unlock_irq(&ctx->wqh.lock); 222 223 return res; 224 } 225 EXPORT_SYMBOL_GPL(eventfd_ctx_read); 226 227 static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count, 228 loff_t *ppos) 229 { 230 struct eventfd_ctx *ctx = file->private_data; 231 ssize_t res; 232 __u64 cnt; 233 234 if (count < sizeof(cnt)) 235 return -EINVAL; 236 res = eventfd_ctx_read(ctx, file->f_flags & O_NONBLOCK, &cnt); 237 if (res < 0) 238 return res; 239 240 return put_user(cnt, (__u64 __user *) buf) ? -EFAULT : sizeof(cnt); 241 } 242 243 static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count, 244 loff_t *ppos) 245 { 246 struct eventfd_ctx *ctx = file->private_data; 247 ssize_t res; 248 __u64 ucnt; 249 DECLARE_WAITQUEUE(wait, current); 250 251 if (count < sizeof(ucnt)) 252 return -EINVAL; 253 if (copy_from_user(&ucnt, buf, sizeof(ucnt))) 254 return -EFAULT; 255 if (ucnt == ULLONG_MAX) 256 return -EINVAL; 257 spin_lock_irq(&ctx->wqh.lock); 258 res = -EAGAIN; 259 if (ULLONG_MAX - ctx->count > ucnt) 260 res = sizeof(ucnt); 261 else if (!(file->f_flags & O_NONBLOCK)) { 262 __add_wait_queue(&ctx->wqh, &wait); 263 for (res = 0;;) { 264 set_current_state(TASK_INTERRUPTIBLE); 265 if (ULLONG_MAX - ctx->count > ucnt) { 266 res = sizeof(ucnt); 267 break; 268 } 269 if (signal_pending(current)) { 270 res = -ERESTARTSYS; 271 break; 272 } 273 spin_unlock_irq(&ctx->wqh.lock); 274 schedule(); 275 spin_lock_irq(&ctx->wqh.lock); 276 } 277 __remove_wait_queue(&ctx->wqh, &wait); 278 __set_current_state(TASK_RUNNING); 279 } 280 if (likely(res > 0)) { 281 ctx->count += ucnt; 282 if (waitqueue_active(&ctx->wqh)) 283 wake_up_locked_poll(&ctx->wqh, POLLIN); 284 } 285 spin_unlock_irq(&ctx->wqh.lock); 286 287 return res; 288 } 289 290 static const struct file_operations eventfd_fops = { 291 .release = eventfd_release, 292 .poll = eventfd_poll, 293 .read = eventfd_read, 294 .write = eventfd_write, 295 }; 296 297 /** 298 * eventfd_fget - Acquire a reference of an eventfd file descriptor. 299 * @fd: [in] Eventfd file descriptor. 300 * 301 * Returns a pointer to the eventfd file structure in case of success, or the 302 * following error pointer: 303 * 304 * -EBADF : Invalid @fd file descriptor. 305 * -EINVAL : The @fd file descriptor is not an eventfd file. 306 */ 307 struct file *eventfd_fget(int fd) 308 { 309 struct file *file; 310 311 file = fget(fd); 312 if (!file) 313 return ERR_PTR(-EBADF); 314 if (file->f_op != &eventfd_fops) { 315 fput(file); 316 return ERR_PTR(-EINVAL); 317 } 318 319 return file; 320 } 321 EXPORT_SYMBOL_GPL(eventfd_fget); 322 323 /** 324 * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context. 325 * @fd: [in] Eventfd file descriptor. 326 * 327 * Returns a pointer to the internal eventfd context, otherwise the error 328 * pointers returned by the following functions: 329 * 330 * eventfd_fget 331 */ 332 struct eventfd_ctx *eventfd_ctx_fdget(int fd) 333 { 334 struct file *file; 335 struct eventfd_ctx *ctx; 336 337 file = eventfd_fget(fd); 338 if (IS_ERR(file)) 339 return (struct eventfd_ctx *) file; 340 ctx = eventfd_ctx_get(file->private_data); 341 fput(file); 342 343 return ctx; 344 } 345 EXPORT_SYMBOL_GPL(eventfd_ctx_fdget); 346 347 /** 348 * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context. 349 * @file: [in] Eventfd file pointer. 350 * 351 * Returns a pointer to the internal eventfd context, otherwise the error 352 * pointer: 353 * 354 * -EINVAL : The @fd file descriptor is not an eventfd file. 355 */ 356 struct eventfd_ctx *eventfd_ctx_fileget(struct file *file) 357 { 358 if (file->f_op != &eventfd_fops) 359 return ERR_PTR(-EINVAL); 360 361 return eventfd_ctx_get(file->private_data); 362 } 363 EXPORT_SYMBOL_GPL(eventfd_ctx_fileget); 364 365 /** 366 * eventfd_file_create - Creates an eventfd file pointer. 367 * @count: Initial eventfd counter value. 368 * @flags: Flags for the eventfd file. 369 * 370 * This function creates an eventfd file pointer, w/out installing it into 371 * the fd table. This is useful when the eventfd file is used during the 372 * initialization of data structures that require extra setup after the eventfd 373 * creation. So the eventfd creation is split into the file pointer creation 374 * phase, and the file descriptor installation phase. 375 * In this way races with userspace closing the newly installed file descriptor 376 * can be avoided. 377 * Returns an eventfd file pointer, or a proper error pointer. 378 */ 379 struct file *eventfd_file_create(unsigned int count, int flags) 380 { 381 struct file *file; 382 struct eventfd_ctx *ctx; 383 384 /* Check the EFD_* constants for consistency. */ 385 BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC); 386 BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK); 387 388 if (flags & ~EFD_FLAGS_SET) 389 return ERR_PTR(-EINVAL); 390 391 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 392 if (!ctx) 393 return ERR_PTR(-ENOMEM); 394 395 kref_init(&ctx->kref); 396 init_waitqueue_head(&ctx->wqh); 397 ctx->count = count; 398 ctx->flags = flags; 399 400 file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, 401 O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS)); 402 if (IS_ERR(file)) 403 eventfd_free_ctx(ctx); 404 405 return file; 406 } 407 408 SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) 409 { 410 int fd, error; 411 struct file *file; 412 413 error = get_unused_fd_flags(flags & EFD_SHARED_FCNTL_FLAGS); 414 if (error < 0) 415 return error; 416 fd = error; 417 418 file = eventfd_file_create(count, flags); 419 if (IS_ERR(file)) { 420 error = PTR_ERR(file); 421 goto err_put_unused_fd; 422 } 423 fd_install(fd, file); 424 425 return fd; 426 427 err_put_unused_fd: 428 put_unused_fd(fd); 429 430 return error; 431 } 432 433 SYSCALL_DEFINE1(eventfd, unsigned int, count) 434 { 435 return sys_eventfd2(count, 0); 436 } 437 438