xref: /linux/fs/fcntl.c (revision 6eb6f98396f7bd653d8fb15b06364c8c7d70e22c)
1 /*
2  *  linux/fs/fcntl.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/syscalls.h>
8 #include <linux/init.h>
9 #include <linux/mm.h>
10 #include <linux/fs.h>
11 #include <linux/file.h>
12 #include <linux/fdtable.h>
13 #include <linux/capability.h>
14 #include <linux/dnotify.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/security.h>
18 #include <linux/ptrace.h>
19 #include <linux/signal.h>
20 #include <linux/rcupdate.h>
21 #include <linux/pid_namespace.h>
22 
23 #include <asm/poll.h>
24 #include <asm/siginfo.h>
25 #include <asm/uaccess.h>
26 
27 void set_close_on_exec(unsigned int fd, int flag)
28 {
29 	struct files_struct *files = current->files;
30 	struct fdtable *fdt;
31 	spin_lock(&files->file_lock);
32 	fdt = files_fdtable(files);
33 	if (flag)
34 		FD_SET(fd, fdt->close_on_exec);
35 	else
36 		FD_CLR(fd, fdt->close_on_exec);
37 	spin_unlock(&files->file_lock);
38 }
39 
40 static int get_close_on_exec(unsigned int fd)
41 {
42 	struct files_struct *files = current->files;
43 	struct fdtable *fdt;
44 	int res;
45 	rcu_read_lock();
46 	fdt = files_fdtable(files);
47 	res = FD_ISSET(fd, fdt->close_on_exec);
48 	rcu_read_unlock();
49 	return res;
50 }
51 
52 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
53 {
54 	int err = -EBADF;
55 	struct file * file, *tofree;
56 	struct files_struct * files = current->files;
57 	struct fdtable *fdt;
58 
59 	if ((flags & ~O_CLOEXEC) != 0)
60 		return -EINVAL;
61 
62 	if (unlikely(oldfd == newfd))
63 		return -EINVAL;
64 
65 	spin_lock(&files->file_lock);
66 	err = expand_files(files, newfd);
67 	file = fcheck(oldfd);
68 	if (unlikely(!file))
69 		goto Ebadf;
70 	if (unlikely(err < 0)) {
71 		if (err == -EMFILE)
72 			goto Ebadf;
73 		goto out_unlock;
74 	}
75 	/*
76 	 * We need to detect attempts to do dup2() over allocated but still
77 	 * not finished descriptor.  NB: OpenBSD avoids that at the price of
78 	 * extra work in their equivalent of fget() - they insert struct
79 	 * file immediately after grabbing descriptor, mark it larval if
80 	 * more work (e.g. actual opening) is needed and make sure that
81 	 * fget() treats larval files as absent.  Potentially interesting,
82 	 * but while extra work in fget() is trivial, locking implications
83 	 * and amount of surgery on open()-related paths in VFS are not.
84 	 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
85 	 * deadlocks in rather amusing ways, AFAICS.  All of that is out of
86 	 * scope of POSIX or SUS, since neither considers shared descriptor
87 	 * tables and this condition does not arise without those.
88 	 */
89 	err = -EBUSY;
90 	fdt = files_fdtable(files);
91 	tofree = fdt->fd[newfd];
92 	if (!tofree && FD_ISSET(newfd, fdt->open_fds))
93 		goto out_unlock;
94 	get_file(file);
95 	rcu_assign_pointer(fdt->fd[newfd], file);
96 	FD_SET(newfd, fdt->open_fds);
97 	if (flags & O_CLOEXEC)
98 		FD_SET(newfd, fdt->close_on_exec);
99 	else
100 		FD_CLR(newfd, fdt->close_on_exec);
101 	spin_unlock(&files->file_lock);
102 
103 	if (tofree)
104 		filp_close(tofree, files);
105 
106 	return newfd;
107 
108 Ebadf:
109 	err = -EBADF;
110 out_unlock:
111 	spin_unlock(&files->file_lock);
112 	return err;
113 }
114 
115 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
116 {
117 	if (unlikely(newfd == oldfd)) { /* corner case */
118 		struct files_struct *files = current->files;
119 		int retval = oldfd;
120 
121 		rcu_read_lock();
122 		if (!fcheck_files(files, oldfd))
123 			retval = -EBADF;
124 		rcu_read_unlock();
125 		return retval;
126 	}
127 	return sys_dup3(oldfd, newfd, 0);
128 }
129 
130 SYSCALL_DEFINE1(dup, unsigned int, fildes)
131 {
132 	int ret = -EBADF;
133 	struct file *file = fget(fildes);
134 
135 	if (file) {
136 		ret = get_unused_fd();
137 		if (ret >= 0)
138 			fd_install(ret, file);
139 		else
140 			fput(file);
141 	}
142 	return ret;
143 }
144 
145 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
146 
147 static int setfl(int fd, struct file * filp, unsigned long arg)
148 {
149 	struct inode * inode = filp->f_path.dentry->d_inode;
150 	int error = 0;
151 
152 	/*
153 	 * O_APPEND cannot be cleared if the file is marked as append-only
154 	 * and the file is open for write.
155 	 */
156 	if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
157 		return -EPERM;
158 
159 	/* O_NOATIME can only be set by the owner or superuser */
160 	if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
161 		if (!is_owner_or_cap(inode))
162 			return -EPERM;
163 
164 	/* required for strict SunOS emulation */
165 	if (O_NONBLOCK != O_NDELAY)
166 	       if (arg & O_NDELAY)
167 		   arg |= O_NONBLOCK;
168 
169 	if (arg & O_DIRECT) {
170 		if (!filp->f_mapping || !filp->f_mapping->a_ops ||
171 			!filp->f_mapping->a_ops->direct_IO)
172 				return -EINVAL;
173 	}
174 
175 	if (filp->f_op && filp->f_op->check_flags)
176 		error = filp->f_op->check_flags(arg);
177 	if (error)
178 		return error;
179 
180 	/*
181 	 * ->fasync() is responsible for setting the FASYNC bit.
182 	 */
183 	if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op &&
184 			filp->f_op->fasync) {
185 		error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
186 		if (error < 0)
187 			goto out;
188 		if (error > 0)
189 			error = 0;
190 	}
191 	spin_lock(&filp->f_lock);
192 	filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
193 	spin_unlock(&filp->f_lock);
194 
195  out:
196 	return error;
197 }
198 
199 static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
200                      int force)
201 {
202 	unsigned long flags;
203 
204 	write_lock_irqsave(&filp->f_owner.lock, flags);
205 	if (force || !filp->f_owner.pid) {
206 		put_pid(filp->f_owner.pid);
207 		filp->f_owner.pid = get_pid(pid);
208 		filp->f_owner.pid_type = type;
209 
210 		if (pid) {
211 			const struct cred *cred = current_cred();
212 			filp->f_owner.uid = cred->uid;
213 			filp->f_owner.euid = cred->euid;
214 		}
215 	}
216 	write_unlock_irqrestore(&filp->f_owner.lock, flags);
217 }
218 
219 int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
220 		int force)
221 {
222 	int err;
223 
224 	err = security_file_set_fowner(filp);
225 	if (err)
226 		return err;
227 
228 	f_modown(filp, pid, type, force);
229 	return 0;
230 }
231 EXPORT_SYMBOL(__f_setown);
232 
233 int f_setown(struct file *filp, unsigned long arg, int force)
234 {
235 	enum pid_type type;
236 	struct pid *pid;
237 	int who = arg;
238 	int result;
239 	type = PIDTYPE_PID;
240 	if (who < 0) {
241 		type = PIDTYPE_PGID;
242 		who = -who;
243 	}
244 	rcu_read_lock();
245 	pid = find_vpid(who);
246 	result = __f_setown(filp, pid, type, force);
247 	rcu_read_unlock();
248 	return result;
249 }
250 EXPORT_SYMBOL(f_setown);
251 
252 void f_delown(struct file *filp)
253 {
254 	f_modown(filp, NULL, PIDTYPE_PID, 1);
255 }
256 
257 pid_t f_getown(struct file *filp)
258 {
259 	pid_t pid;
260 	read_lock(&filp->f_owner.lock);
261 	pid = pid_vnr(filp->f_owner.pid);
262 	if (filp->f_owner.pid_type == PIDTYPE_PGID)
263 		pid = -pid;
264 	read_unlock(&filp->f_owner.lock);
265 	return pid;
266 }
267 
268 static int f_setown_ex(struct file *filp, unsigned long arg)
269 {
270 	struct f_owner_ex * __user owner_p = (void * __user)arg;
271 	struct f_owner_ex owner;
272 	struct pid *pid;
273 	int type;
274 	int ret;
275 
276 	ret = copy_from_user(&owner, owner_p, sizeof(owner));
277 	if (ret)
278 		return ret;
279 
280 	switch (owner.type) {
281 	case F_OWNER_TID:
282 		type = PIDTYPE_MAX;
283 		break;
284 
285 	case F_OWNER_PID:
286 		type = PIDTYPE_PID;
287 		break;
288 
289 	case F_OWNER_PGRP:
290 		type = PIDTYPE_PGID;
291 		break;
292 
293 	default:
294 		return -EINVAL;
295 	}
296 
297 	rcu_read_lock();
298 	pid = find_vpid(owner.pid);
299 	if (owner.pid && !pid)
300 		ret = -ESRCH;
301 	else
302 		ret = __f_setown(filp, pid, type, 1);
303 	rcu_read_unlock();
304 
305 	return ret;
306 }
307 
308 static int f_getown_ex(struct file *filp, unsigned long arg)
309 {
310 	struct f_owner_ex * __user owner_p = (void * __user)arg;
311 	struct f_owner_ex owner;
312 	int ret = 0;
313 
314 	read_lock(&filp->f_owner.lock);
315 	owner.pid = pid_vnr(filp->f_owner.pid);
316 	switch (filp->f_owner.pid_type) {
317 	case PIDTYPE_MAX:
318 		owner.type = F_OWNER_TID;
319 		break;
320 
321 	case PIDTYPE_PID:
322 		owner.type = F_OWNER_PID;
323 		break;
324 
325 	case PIDTYPE_PGID:
326 		owner.type = F_OWNER_PGRP;
327 		break;
328 
329 	default:
330 		WARN_ON(1);
331 		ret = -EINVAL;
332 		break;
333 	}
334 	read_unlock(&filp->f_owner.lock);
335 
336 	if (!ret)
337 		ret = copy_to_user(owner_p, &owner, sizeof(owner));
338 	return ret;
339 }
340 
341 static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
342 		struct file *filp)
343 {
344 	long err = -EINVAL;
345 
346 	switch (cmd) {
347 	case F_DUPFD:
348 	case F_DUPFD_CLOEXEC:
349 		if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
350 			break;
351 		err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
352 		if (err >= 0) {
353 			get_file(filp);
354 			fd_install(err, filp);
355 		}
356 		break;
357 	case F_GETFD:
358 		err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
359 		break;
360 	case F_SETFD:
361 		err = 0;
362 		set_close_on_exec(fd, arg & FD_CLOEXEC);
363 		break;
364 	case F_GETFL:
365 		err = filp->f_flags;
366 		break;
367 	case F_SETFL:
368 		err = setfl(fd, filp, arg);
369 		break;
370 	case F_GETLK:
371 		err = fcntl_getlk(filp, (struct flock __user *) arg);
372 		break;
373 	case F_SETLK:
374 	case F_SETLKW:
375 		err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg);
376 		break;
377 	case F_GETOWN:
378 		/*
379 		 * XXX If f_owner is a process group, the
380 		 * negative return value will get converted
381 		 * into an error.  Oops.  If we keep the
382 		 * current syscall conventions, the only way
383 		 * to fix this will be in libc.
384 		 */
385 		err = f_getown(filp);
386 		force_successful_syscall_return();
387 		break;
388 	case F_SETOWN:
389 		err = f_setown(filp, arg, 1);
390 		break;
391 	case F_GETOWN_EX:
392 		err = f_getown_ex(filp, arg);
393 		break;
394 	case F_SETOWN_EX:
395 		err = f_setown_ex(filp, arg);
396 		break;
397 	case F_GETSIG:
398 		err = filp->f_owner.signum;
399 		break;
400 	case F_SETSIG:
401 		/* arg == 0 restores default behaviour. */
402 		if (!valid_signal(arg)) {
403 			break;
404 		}
405 		err = 0;
406 		filp->f_owner.signum = arg;
407 		break;
408 	case F_GETLEASE:
409 		err = fcntl_getlease(filp);
410 		break;
411 	case F_SETLEASE:
412 		err = fcntl_setlease(fd, filp, arg);
413 		break;
414 	case F_NOTIFY:
415 		err = fcntl_dirnotify(fd, filp, arg);
416 		break;
417 	default:
418 		break;
419 	}
420 	return err;
421 }
422 
423 SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
424 {
425 	struct file *filp;
426 	long err = -EBADF;
427 
428 	filp = fget(fd);
429 	if (!filp)
430 		goto out;
431 
432 	err = security_file_fcntl(filp, cmd, arg);
433 	if (err) {
434 		fput(filp);
435 		return err;
436 	}
437 
438 	err = do_fcntl(fd, cmd, arg, filp);
439 
440  	fput(filp);
441 out:
442 	return err;
443 }
444 
445 #if BITS_PER_LONG == 32
446 SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
447 		unsigned long, arg)
448 {
449 	struct file * filp;
450 	long err;
451 
452 	err = -EBADF;
453 	filp = fget(fd);
454 	if (!filp)
455 		goto out;
456 
457 	err = security_file_fcntl(filp, cmd, arg);
458 	if (err) {
459 		fput(filp);
460 		return err;
461 	}
462 	err = -EBADF;
463 
464 	switch (cmd) {
465 		case F_GETLK64:
466 			err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
467 			break;
468 		case F_SETLK64:
469 		case F_SETLKW64:
470 			err = fcntl_setlk64(fd, filp, cmd,
471 					(struct flock64 __user *) arg);
472 			break;
473 		default:
474 			err = do_fcntl(fd, cmd, arg, filp);
475 			break;
476 	}
477 	fput(filp);
478 out:
479 	return err;
480 }
481 #endif
482 
483 /* Table to convert sigio signal codes into poll band bitmaps */
484 
485 static const long band_table[NSIGPOLL] = {
486 	POLLIN | POLLRDNORM,			/* POLL_IN */
487 	POLLOUT | POLLWRNORM | POLLWRBAND,	/* POLL_OUT */
488 	POLLIN | POLLRDNORM | POLLMSG,		/* POLL_MSG */
489 	POLLERR,				/* POLL_ERR */
490 	POLLPRI | POLLRDBAND,			/* POLL_PRI */
491 	POLLHUP | POLLERR			/* POLL_HUP */
492 };
493 
494 static inline int sigio_perm(struct task_struct *p,
495                              struct fown_struct *fown, int sig)
496 {
497 	const struct cred *cred;
498 	int ret;
499 
500 	rcu_read_lock();
501 	cred = __task_cred(p);
502 	ret = ((fown->euid == 0 ||
503 		fown->euid == cred->suid || fown->euid == cred->uid ||
504 		fown->uid  == cred->suid || fown->uid  == cred->uid) &&
505 	       !security_file_send_sigiotask(p, fown, sig));
506 	rcu_read_unlock();
507 	return ret;
508 }
509 
510 static void send_sigio_to_task(struct task_struct *p,
511 			       struct fown_struct *fown,
512 			       int fd, int reason, int group)
513 {
514 	/*
515 	 * F_SETSIG can change ->signum lockless in parallel, make
516 	 * sure we read it once and use the same value throughout.
517 	 */
518 	int signum = ACCESS_ONCE(fown->signum);
519 
520 	if (!sigio_perm(p, fown, signum))
521 		return;
522 
523 	switch (signum) {
524 		siginfo_t si;
525 		default:
526 			/* Queue a rt signal with the appropriate fd as its
527 			   value.  We use SI_SIGIO as the source, not
528 			   SI_KERNEL, since kernel signals always get
529 			   delivered even if we can't queue.  Failure to
530 			   queue in this case _should_ be reported; we fall
531 			   back to SIGIO in that case. --sct */
532 			si.si_signo = signum;
533 			si.si_errno = 0;
534 		        si.si_code  = reason;
535 			/* Make sure we are called with one of the POLL_*
536 			   reasons, otherwise we could leak kernel stack into
537 			   userspace.  */
538 			BUG_ON((reason & __SI_MASK) != __SI_POLL);
539 			if (reason - POLL_IN >= NSIGPOLL)
540 				si.si_band  = ~0L;
541 			else
542 				si.si_band = band_table[reason - POLL_IN];
543 			si.si_fd    = fd;
544 			if (!do_send_sig_info(signum, &si, p, group))
545 				break;
546 		/* fall-through: fall back on the old plain SIGIO signal */
547 		case 0:
548 			do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, group);
549 	}
550 }
551 
552 void send_sigio(struct fown_struct *fown, int fd, int band)
553 {
554 	struct task_struct *p;
555 	enum pid_type type;
556 	struct pid *pid;
557 	int group = 1;
558 
559 	read_lock(&fown->lock);
560 
561 	type = fown->pid_type;
562 	if (type == PIDTYPE_MAX) {
563 		group = 0;
564 		type = PIDTYPE_PID;
565 	}
566 
567 	pid = fown->pid;
568 	if (!pid)
569 		goto out_unlock_fown;
570 
571 	read_lock(&tasklist_lock);
572 	do_each_pid_task(pid, type, p) {
573 		send_sigio_to_task(p, fown, fd, band, group);
574 	} while_each_pid_task(pid, type, p);
575 	read_unlock(&tasklist_lock);
576  out_unlock_fown:
577 	read_unlock(&fown->lock);
578 }
579 
580 static void send_sigurg_to_task(struct task_struct *p,
581 				struct fown_struct *fown, int group)
582 {
583 	if (sigio_perm(p, fown, SIGURG))
584 		do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, group);
585 }
586 
587 int send_sigurg(struct fown_struct *fown)
588 {
589 	struct task_struct *p;
590 	enum pid_type type;
591 	struct pid *pid;
592 	int group = 1;
593 	int ret = 0;
594 
595 	read_lock(&fown->lock);
596 
597 	type = fown->pid_type;
598 	if (type == PIDTYPE_MAX) {
599 		group = 0;
600 		type = PIDTYPE_PID;
601 	}
602 
603 	pid = fown->pid;
604 	if (!pid)
605 		goto out_unlock_fown;
606 
607 	ret = 1;
608 
609 	read_lock(&tasklist_lock);
610 	do_each_pid_task(pid, type, p) {
611 		send_sigurg_to_task(p, fown, group);
612 	} while_each_pid_task(pid, type, p);
613 	read_unlock(&tasklist_lock);
614  out_unlock_fown:
615 	read_unlock(&fown->lock);
616 	return ret;
617 }
618 
619 static DEFINE_RWLOCK(fasync_lock);
620 static struct kmem_cache *fasync_cache __read_mostly;
621 
622 /*
623  * Remove a fasync entry. If successfully removed, return
624  * positive and clear the FASYNC flag. If no entry exists,
625  * do nothing and return 0.
626  *
627  * NOTE! It is very important that the FASYNC flag always
628  * match the state "is the filp on a fasync list".
629  *
630  * We always take the 'filp->f_lock', in since fasync_lock
631  * needs to be irq-safe.
632  */
633 static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
634 {
635 	struct fasync_struct *fa, **fp;
636 	int result = 0;
637 
638 	spin_lock(&filp->f_lock);
639 	write_lock_irq(&fasync_lock);
640 	for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
641 		if (fa->fa_file != filp)
642 			continue;
643 		*fp = fa->fa_next;
644 		kmem_cache_free(fasync_cache, fa);
645 		filp->f_flags &= ~FASYNC;
646 		result = 1;
647 		break;
648 	}
649 	write_unlock_irq(&fasync_lock);
650 	spin_unlock(&filp->f_lock);
651 	return result;
652 }
653 
654 /*
655  * Add a fasync entry. Return negative on error, positive if
656  * added, and zero if did nothing but change an existing one.
657  *
658  * NOTE! It is very important that the FASYNC flag always
659  * match the state "is the filp on a fasync list".
660  */
661 static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
662 {
663 	struct fasync_struct *new, *fa, **fp;
664 	int result = 0;
665 
666 	new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
667 	if (!new)
668 		return -ENOMEM;
669 
670 	spin_lock(&filp->f_lock);
671 	write_lock_irq(&fasync_lock);
672 	for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
673 		if (fa->fa_file != filp)
674 			continue;
675 		fa->fa_fd = fd;
676 		kmem_cache_free(fasync_cache, new);
677 		goto out;
678 	}
679 
680 	new->magic = FASYNC_MAGIC;
681 	new->fa_file = filp;
682 	new->fa_fd = fd;
683 	new->fa_next = *fapp;
684 	*fapp = new;
685 	result = 1;
686 	filp->f_flags |= FASYNC;
687 
688 out:
689 	write_unlock_irq(&fasync_lock);
690 	spin_unlock(&filp->f_lock);
691 	return result;
692 }
693 
694 /*
695  * fasync_helper() is used by almost all character device drivers
696  * to set up the fasync queue, and for regular files by the file
697  * lease code. It returns negative on error, 0 if it did no changes
698  * and positive if it added/deleted the entry.
699  */
700 int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
701 {
702 	if (!on)
703 		return fasync_remove_entry(filp, fapp);
704 	return fasync_add_entry(fd, filp, fapp);
705 }
706 
707 EXPORT_SYMBOL(fasync_helper);
708 
709 void __kill_fasync(struct fasync_struct *fa, int sig, int band)
710 {
711 	while (fa) {
712 		struct fown_struct * fown;
713 		if (fa->magic != FASYNC_MAGIC) {
714 			printk(KERN_ERR "kill_fasync: bad magic number in "
715 			       "fasync_struct!\n");
716 			return;
717 		}
718 		fown = &fa->fa_file->f_owner;
719 		/* Don't send SIGURG to processes which have not set a
720 		   queued signum: SIGURG has its own default signalling
721 		   mechanism. */
722 		if (!(sig == SIGURG && fown->signum == 0))
723 			send_sigio(fown, fa->fa_fd, band);
724 		fa = fa->fa_next;
725 	}
726 }
727 
728 EXPORT_SYMBOL(__kill_fasync);
729 
730 void kill_fasync(struct fasync_struct **fp, int sig, int band)
731 {
732 	/* First a quick test without locking: usually
733 	 * the list is empty.
734 	 */
735 	if (*fp) {
736 		read_lock(&fasync_lock);
737 		/* reread *fp after obtaining the lock */
738 		__kill_fasync(*fp, sig, band);
739 		read_unlock(&fasync_lock);
740 	}
741 }
742 EXPORT_SYMBOL(kill_fasync);
743 
744 static int __init fasync_init(void)
745 {
746 	fasync_cache = kmem_cache_create("fasync_cache",
747 		sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
748 	return 0;
749 }
750 
751 module_init(fasync_init)
752