xref: /linux/fs/kernfs/file.c (revision 00a6d7b6762c27d441e9ac8faff36384bc0fc180)
1 /*
2  * fs/kernfs/file.c - kernfs file implementation
3  *
4  * Copyright (c) 2001-3 Patrick Mochel
5  * Copyright (c) 2007 SUSE Linux Products GmbH
6  * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
7  *
8  * This file is released under the GPLv2.
9  */
10 
11 #include <linux/fs.h>
12 #include <linux/seq_file.h>
13 #include <linux/slab.h>
14 #include <linux/poll.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 
18 #include "kernfs-internal.h"
19 
20 /*
21  * There's one kernfs_open_file for each open file and one kernfs_open_node
22  * for each kernfs_node with one or more open files.
23  *
24  * kernfs_node->attr.open points to kernfs_open_node.  attr.open is
25  * protected by kernfs_open_node_lock.
26  *
27  * filp->private_data points to seq_file whose ->private points to
28  * kernfs_open_file.  kernfs_open_files are chained at
29  * kernfs_open_node->files, which is protected by kernfs_open_file_mutex.
30  */
31 static DEFINE_SPINLOCK(kernfs_open_node_lock);
32 static DEFINE_MUTEX(kernfs_open_file_mutex);
33 
34 struct kernfs_open_node {
35 	atomic_t		refcnt;
36 	atomic_t		event;
37 	wait_queue_head_t	poll;
38 	struct list_head	files; /* goes through kernfs_open_file.list */
39 };
40 
41 static struct kernfs_open_file *kernfs_of(struct file *file)
42 {
43 	return ((struct seq_file *)file->private_data)->private;
44 }
45 
46 /*
47  * Determine the kernfs_ops for the given kernfs_node.  This function must
48  * be called while holding an active reference.
49  */
50 static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn)
51 {
52 	if (kn->flags & KERNFS_LOCKDEP)
53 		lockdep_assert_held(kn);
54 	return kn->attr.ops;
55 }
56 
57 /*
58  * As kernfs_seq_stop() is also called after kernfs_seq_start() or
59  * kernfs_seq_next() failure, it needs to distinguish whether it's stopping
60  * a seq_file iteration which is fully initialized with an active reference
61  * or an aborted kernfs_seq_start() due to get_active failure.  The
62  * position pointer is the only context for each seq_file iteration and
63  * thus the stop condition should be encoded in it.  As the return value is
64  * directly visible to userland, ERR_PTR(-ENODEV) is the only acceptable
65  * choice to indicate get_active failure.
66  *
67  * Unfortunately, this is complicated due to the optional custom seq_file
68  * operations which may return ERR_PTR(-ENODEV) too.  kernfs_seq_stop()
69  * can't distinguish whether ERR_PTR(-ENODEV) is from get_active failure or
70  * custom seq_file operations and thus can't decide whether put_active
71  * should be performed or not only on ERR_PTR(-ENODEV).
72  *
73  * This is worked around by factoring out the custom seq_stop() and
74  * put_active part into kernfs_seq_stop_active(), skipping it from
75  * kernfs_seq_stop() if ERR_PTR(-ENODEV) while invoking it directly after
76  * custom seq_file operations fail with ERR_PTR(-ENODEV) - this ensures
77  * that kernfs_seq_stop_active() is skipped only after get_active failure.
78  */
79 static void kernfs_seq_stop_active(struct seq_file *sf, void *v)
80 {
81 	struct kernfs_open_file *of = sf->private;
82 	const struct kernfs_ops *ops = kernfs_ops(of->kn);
83 
84 	if (ops->seq_stop)
85 		ops->seq_stop(sf, v);
86 	kernfs_put_active(of->kn);
87 }
88 
89 static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
90 {
91 	struct kernfs_open_file *of = sf->private;
92 	const struct kernfs_ops *ops;
93 
94 	/*
95 	 * @of->mutex nests outside active ref and is just to ensure that
96 	 * the ops aren't called concurrently for the same open file.
97 	 */
98 	mutex_lock(&of->mutex);
99 	if (!kernfs_get_active(of->kn))
100 		return ERR_PTR(-ENODEV);
101 
102 	ops = kernfs_ops(of->kn);
103 	if (ops->seq_start) {
104 		void *next = ops->seq_start(sf, ppos);
105 		/* see the comment above kernfs_seq_stop_active() */
106 		if (next == ERR_PTR(-ENODEV))
107 			kernfs_seq_stop_active(sf, next);
108 		return next;
109 	} else {
110 		/*
111 		 * The same behavior and code as single_open().  Returns
112 		 * !NULL if pos is at the beginning; otherwise, NULL.
113 		 */
114 		return NULL + !*ppos;
115 	}
116 }
117 
118 static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
119 {
120 	struct kernfs_open_file *of = sf->private;
121 	const struct kernfs_ops *ops = kernfs_ops(of->kn);
122 
123 	if (ops->seq_next) {
124 		void *next = ops->seq_next(sf, v, ppos);
125 		/* see the comment above kernfs_seq_stop_active() */
126 		if (next == ERR_PTR(-ENODEV))
127 			kernfs_seq_stop_active(sf, next);
128 		return next;
129 	} else {
130 		/*
131 		 * The same behavior and code as single_open(), always
132 		 * terminate after the initial read.
133 		 */
134 		++*ppos;
135 		return NULL;
136 	}
137 }
138 
139 static void kernfs_seq_stop(struct seq_file *sf, void *v)
140 {
141 	struct kernfs_open_file *of = sf->private;
142 
143 	if (v != ERR_PTR(-ENODEV))
144 		kernfs_seq_stop_active(sf, v);
145 	mutex_unlock(&of->mutex);
146 }
147 
148 static int kernfs_seq_show(struct seq_file *sf, void *v)
149 {
150 	struct kernfs_open_file *of = sf->private;
151 
152 	of->event = atomic_read(&of->kn->attr.open->event);
153 
154 	return of->kn->attr.ops->seq_show(sf, v);
155 }
156 
157 static const struct seq_operations kernfs_seq_ops = {
158 	.start = kernfs_seq_start,
159 	.next = kernfs_seq_next,
160 	.stop = kernfs_seq_stop,
161 	.show = kernfs_seq_show,
162 };
163 
164 /*
165  * As reading a bin file can have side-effects, the exact offset and bytes
166  * specified in read(2) call should be passed to the read callback making
167  * it difficult to use seq_file.  Implement simplistic custom buffering for
168  * bin files.
169  */
170 static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
171 				       char __user *user_buf, size_t count,
172 				       loff_t *ppos)
173 {
174 	ssize_t len = min_t(size_t, count, PAGE_SIZE);
175 	const struct kernfs_ops *ops;
176 	char *buf;
177 
178 	buf = kmalloc(len, GFP_KERNEL);
179 	if (!buf)
180 		return -ENOMEM;
181 
182 	/*
183 	 * @of->mutex nests outside active ref and is just to ensure that
184 	 * the ops aren't called concurrently for the same open file.
185 	 */
186 	mutex_lock(&of->mutex);
187 	if (!kernfs_get_active(of->kn)) {
188 		len = -ENODEV;
189 		mutex_unlock(&of->mutex);
190 		goto out_free;
191 	}
192 
193 	ops = kernfs_ops(of->kn);
194 	if (ops->read)
195 		len = ops->read(of, buf, len, *ppos);
196 	else
197 		len = -EINVAL;
198 
199 	kernfs_put_active(of->kn);
200 	mutex_unlock(&of->mutex);
201 
202 	if (len < 0)
203 		goto out_free;
204 
205 	if (copy_to_user(user_buf, buf, len)) {
206 		len = -EFAULT;
207 		goto out_free;
208 	}
209 
210 	*ppos += len;
211 
212  out_free:
213 	kfree(buf);
214 	return len;
215 }
216 
217 /**
218  * kernfs_fop_read - kernfs vfs read callback
219  * @file: file pointer
220  * @user_buf: data to write
221  * @count: number of bytes
222  * @ppos: starting offset
223  */
224 static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
225 			       size_t count, loff_t *ppos)
226 {
227 	struct kernfs_open_file *of = kernfs_of(file);
228 
229 	if (of->kn->flags & KERNFS_HAS_SEQ_SHOW)
230 		return seq_read(file, user_buf, count, ppos);
231 	else
232 		return kernfs_file_direct_read(of, user_buf, count, ppos);
233 }
234 
235 /**
236  * kernfs_fop_write - kernfs vfs write callback
237  * @file: file pointer
238  * @user_buf: data to write
239  * @count: number of bytes
240  * @ppos: starting offset
241  *
242  * Copy data in from userland and pass it to the matching kernfs write
243  * operation.
244  *
245  * There is no easy way for us to know if userspace is only doing a partial
246  * write, so we don't support them. We expect the entire buffer to come on
247  * the first write.  Hint: if you're writing a value, first read the file,
248  * modify only the the value you're changing, then write entire buffer
249  * back.
250  */
251 static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
252 				size_t count, loff_t *ppos)
253 {
254 	struct kernfs_open_file *of = kernfs_of(file);
255 	const struct kernfs_ops *ops;
256 	size_t len;
257 	char *buf;
258 
259 	if (of->atomic_write_len) {
260 		len = count;
261 		if (len > of->atomic_write_len)
262 			return -E2BIG;
263 	} else {
264 		len = min_t(size_t, count, PAGE_SIZE);
265 	}
266 
267 	buf = kmalloc(len + 1, GFP_KERNEL);
268 	if (!buf)
269 		return -ENOMEM;
270 
271 	if (copy_from_user(buf, user_buf, len)) {
272 		len = -EFAULT;
273 		goto out_free;
274 	}
275 	buf[len] = '\0';	/* guarantee string termination */
276 
277 	/*
278 	 * @of->mutex nests outside active ref and is just to ensure that
279 	 * the ops aren't called concurrently for the same open file.
280 	 */
281 	mutex_lock(&of->mutex);
282 	if (!kernfs_get_active(of->kn)) {
283 		mutex_unlock(&of->mutex);
284 		len = -ENODEV;
285 		goto out_free;
286 	}
287 
288 	ops = kernfs_ops(of->kn);
289 	if (ops->write)
290 		len = ops->write(of, buf, len, *ppos);
291 	else
292 		len = -EINVAL;
293 
294 	kernfs_put_active(of->kn);
295 	mutex_unlock(&of->mutex);
296 
297 	if (len > 0)
298 		*ppos += len;
299 out_free:
300 	kfree(buf);
301 	return len;
302 }
303 
304 static void kernfs_vma_open(struct vm_area_struct *vma)
305 {
306 	struct file *file = vma->vm_file;
307 	struct kernfs_open_file *of = kernfs_of(file);
308 
309 	if (!of->vm_ops)
310 		return;
311 
312 	if (!kernfs_get_active(of->kn))
313 		return;
314 
315 	if (of->vm_ops->open)
316 		of->vm_ops->open(vma);
317 
318 	kernfs_put_active(of->kn);
319 }
320 
321 static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
322 {
323 	struct file *file = vma->vm_file;
324 	struct kernfs_open_file *of = kernfs_of(file);
325 	int ret;
326 
327 	if (!of->vm_ops)
328 		return VM_FAULT_SIGBUS;
329 
330 	if (!kernfs_get_active(of->kn))
331 		return VM_FAULT_SIGBUS;
332 
333 	ret = VM_FAULT_SIGBUS;
334 	if (of->vm_ops->fault)
335 		ret = of->vm_ops->fault(vma, vmf);
336 
337 	kernfs_put_active(of->kn);
338 	return ret;
339 }
340 
341 static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
342 				   struct vm_fault *vmf)
343 {
344 	struct file *file = vma->vm_file;
345 	struct kernfs_open_file *of = kernfs_of(file);
346 	int ret;
347 
348 	if (!of->vm_ops)
349 		return VM_FAULT_SIGBUS;
350 
351 	if (!kernfs_get_active(of->kn))
352 		return VM_FAULT_SIGBUS;
353 
354 	ret = 0;
355 	if (of->vm_ops->page_mkwrite)
356 		ret = of->vm_ops->page_mkwrite(vma, vmf);
357 	else
358 		file_update_time(file);
359 
360 	kernfs_put_active(of->kn);
361 	return ret;
362 }
363 
364 static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
365 			     void *buf, int len, int write)
366 {
367 	struct file *file = vma->vm_file;
368 	struct kernfs_open_file *of = kernfs_of(file);
369 	int ret;
370 
371 	if (!of->vm_ops)
372 		return -EINVAL;
373 
374 	if (!kernfs_get_active(of->kn))
375 		return -EINVAL;
376 
377 	ret = -EINVAL;
378 	if (of->vm_ops->access)
379 		ret = of->vm_ops->access(vma, addr, buf, len, write);
380 
381 	kernfs_put_active(of->kn);
382 	return ret;
383 }
384 
385 #ifdef CONFIG_NUMA
386 static int kernfs_vma_set_policy(struct vm_area_struct *vma,
387 				 struct mempolicy *new)
388 {
389 	struct file *file = vma->vm_file;
390 	struct kernfs_open_file *of = kernfs_of(file);
391 	int ret;
392 
393 	if (!of->vm_ops)
394 		return 0;
395 
396 	if (!kernfs_get_active(of->kn))
397 		return -EINVAL;
398 
399 	ret = 0;
400 	if (of->vm_ops->set_policy)
401 		ret = of->vm_ops->set_policy(vma, new);
402 
403 	kernfs_put_active(of->kn);
404 	return ret;
405 }
406 
407 static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
408 					       unsigned long addr)
409 {
410 	struct file *file = vma->vm_file;
411 	struct kernfs_open_file *of = kernfs_of(file);
412 	struct mempolicy *pol;
413 
414 	if (!of->vm_ops)
415 		return vma->vm_policy;
416 
417 	if (!kernfs_get_active(of->kn))
418 		return vma->vm_policy;
419 
420 	pol = vma->vm_policy;
421 	if (of->vm_ops->get_policy)
422 		pol = of->vm_ops->get_policy(vma, addr);
423 
424 	kernfs_put_active(of->kn);
425 	return pol;
426 }
427 
428 static int kernfs_vma_migrate(struct vm_area_struct *vma,
429 			      const nodemask_t *from, const nodemask_t *to,
430 			      unsigned long flags)
431 {
432 	struct file *file = vma->vm_file;
433 	struct kernfs_open_file *of = kernfs_of(file);
434 	int ret;
435 
436 	if (!of->vm_ops)
437 		return 0;
438 
439 	if (!kernfs_get_active(of->kn))
440 		return 0;
441 
442 	ret = 0;
443 	if (of->vm_ops->migrate)
444 		ret = of->vm_ops->migrate(vma, from, to, flags);
445 
446 	kernfs_put_active(of->kn);
447 	return ret;
448 }
449 #endif
450 
451 static const struct vm_operations_struct kernfs_vm_ops = {
452 	.open		= kernfs_vma_open,
453 	.fault		= kernfs_vma_fault,
454 	.page_mkwrite	= kernfs_vma_page_mkwrite,
455 	.access		= kernfs_vma_access,
456 #ifdef CONFIG_NUMA
457 	.set_policy	= kernfs_vma_set_policy,
458 	.get_policy	= kernfs_vma_get_policy,
459 	.migrate	= kernfs_vma_migrate,
460 #endif
461 };
462 
463 static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
464 {
465 	struct kernfs_open_file *of = kernfs_of(file);
466 	const struct kernfs_ops *ops;
467 	int rc;
468 
469 	/*
470 	 * mmap path and of->mutex are prone to triggering spurious lockdep
471 	 * warnings and we don't want to add spurious locking dependency
472 	 * between the two.  Check whether mmap is actually implemented
473 	 * without grabbing @of->mutex by testing HAS_MMAP flag.  See the
474 	 * comment in kernfs_file_open() for more details.
475 	 */
476 	if (!(of->kn->flags & KERNFS_HAS_MMAP))
477 		return -ENODEV;
478 
479 	mutex_lock(&of->mutex);
480 
481 	rc = -ENODEV;
482 	if (!kernfs_get_active(of->kn))
483 		goto out_unlock;
484 
485 	ops = kernfs_ops(of->kn);
486 	rc = ops->mmap(of, vma);
487 	if (rc)
488 		goto out_put;
489 
490 	/*
491 	 * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
492 	 * to satisfy versions of X which crash if the mmap fails: that
493 	 * substitutes a new vm_file, and we don't then want bin_vm_ops.
494 	 */
495 	if (vma->vm_file != file)
496 		goto out_put;
497 
498 	rc = -EINVAL;
499 	if (of->mmapped && of->vm_ops != vma->vm_ops)
500 		goto out_put;
501 
502 	/*
503 	 * It is not possible to successfully wrap close.
504 	 * So error if someone is trying to use close.
505 	 */
506 	rc = -EINVAL;
507 	if (vma->vm_ops && vma->vm_ops->close)
508 		goto out_put;
509 
510 	rc = 0;
511 	of->mmapped = 1;
512 	of->vm_ops = vma->vm_ops;
513 	vma->vm_ops = &kernfs_vm_ops;
514 out_put:
515 	kernfs_put_active(of->kn);
516 out_unlock:
517 	mutex_unlock(&of->mutex);
518 
519 	return rc;
520 }
521 
522 /**
523  *	kernfs_get_open_node - get or create kernfs_open_node
524  *	@kn: target kernfs_node
525  *	@of: kernfs_open_file for this instance of open
526  *
527  *	If @kn->attr.open exists, increment its reference count; otherwise,
528  *	create one.  @of is chained to the files list.
529  *
530  *	LOCKING:
531  *	Kernel thread context (may sleep).
532  *
533  *	RETURNS:
534  *	0 on success, -errno on failure.
535  */
536 static int kernfs_get_open_node(struct kernfs_node *kn,
537 				struct kernfs_open_file *of)
538 {
539 	struct kernfs_open_node *on, *new_on = NULL;
540 
541  retry:
542 	mutex_lock(&kernfs_open_file_mutex);
543 	spin_lock_irq(&kernfs_open_node_lock);
544 
545 	if (!kn->attr.open && new_on) {
546 		kn->attr.open = new_on;
547 		new_on = NULL;
548 	}
549 
550 	on = kn->attr.open;
551 	if (on) {
552 		atomic_inc(&on->refcnt);
553 		list_add_tail(&of->list, &on->files);
554 	}
555 
556 	spin_unlock_irq(&kernfs_open_node_lock);
557 	mutex_unlock(&kernfs_open_file_mutex);
558 
559 	if (on) {
560 		kfree(new_on);
561 		return 0;
562 	}
563 
564 	/* not there, initialize a new one and retry */
565 	new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
566 	if (!new_on)
567 		return -ENOMEM;
568 
569 	atomic_set(&new_on->refcnt, 0);
570 	atomic_set(&new_on->event, 1);
571 	init_waitqueue_head(&new_on->poll);
572 	INIT_LIST_HEAD(&new_on->files);
573 	goto retry;
574 }
575 
576 /**
577  *	kernfs_put_open_node - put kernfs_open_node
578  *	@kn: target kernfs_nodet
579  *	@of: associated kernfs_open_file
580  *
581  *	Put @kn->attr.open and unlink @of from the files list.  If
582  *	reference count reaches zero, disassociate and free it.
583  *
584  *	LOCKING:
585  *	None.
586  */
587 static void kernfs_put_open_node(struct kernfs_node *kn,
588 				 struct kernfs_open_file *of)
589 {
590 	struct kernfs_open_node *on = kn->attr.open;
591 	unsigned long flags;
592 
593 	mutex_lock(&kernfs_open_file_mutex);
594 	spin_lock_irqsave(&kernfs_open_node_lock, flags);
595 
596 	if (of)
597 		list_del(&of->list);
598 
599 	if (atomic_dec_and_test(&on->refcnt))
600 		kn->attr.open = NULL;
601 	else
602 		on = NULL;
603 
604 	spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
605 	mutex_unlock(&kernfs_open_file_mutex);
606 
607 	kfree(on);
608 }
609 
610 static int kernfs_fop_open(struct inode *inode, struct file *file)
611 {
612 	struct kernfs_node *kn = file->f_path.dentry->d_fsdata;
613 	const struct kernfs_ops *ops;
614 	struct kernfs_open_file *of;
615 	bool has_read, has_write, has_mmap;
616 	int error = -EACCES;
617 
618 	if (!kernfs_get_active(kn))
619 		return -ENODEV;
620 
621 	ops = kernfs_ops(kn);
622 
623 	has_read = ops->seq_show || ops->read || ops->mmap;
624 	has_write = ops->write || ops->mmap;
625 	has_mmap = ops->mmap;
626 
627 	/* check perms and supported operations */
628 	if ((file->f_mode & FMODE_WRITE) &&
629 	    (!(inode->i_mode & S_IWUGO) || !has_write))
630 		goto err_out;
631 
632 	if ((file->f_mode & FMODE_READ) &&
633 	    (!(inode->i_mode & S_IRUGO) || !has_read))
634 		goto err_out;
635 
636 	/* allocate a kernfs_open_file for the file */
637 	error = -ENOMEM;
638 	of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL);
639 	if (!of)
640 		goto err_out;
641 
642 	/*
643 	 * The following is done to give a different lockdep key to
644 	 * @of->mutex for files which implement mmap.  This is a rather
645 	 * crude way to avoid false positive lockdep warning around
646 	 * mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and
647 	 * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
648 	 * which mm->mmap_sem nests, while holding @of->mutex.  As each
649 	 * open file has a separate mutex, it's okay as long as those don't
650 	 * happen on the same file.  At this point, we can't easily give
651 	 * each file a separate locking class.  Let's differentiate on
652 	 * whether the file has mmap or not for now.
653 	 *
654 	 * Both paths of the branch look the same.  They're supposed to
655 	 * look that way and give @of->mutex different static lockdep keys.
656 	 */
657 	if (has_mmap)
658 		mutex_init(&of->mutex);
659 	else
660 		mutex_init(&of->mutex);
661 
662 	of->kn = kn;
663 	of->file = file;
664 
665 	/*
666 	 * Write path needs to atomic_write_len outside active reference.
667 	 * Cache it in open_file.  See kernfs_fop_write() for details.
668 	 */
669 	of->atomic_write_len = ops->atomic_write_len;
670 
671 	/*
672 	 * Always instantiate seq_file even if read access doesn't use
673 	 * seq_file or is not requested.  This unifies private data access
674 	 * and readable regular files are the vast majority anyway.
675 	 */
676 	if (ops->seq_show)
677 		error = seq_open(file, &kernfs_seq_ops);
678 	else
679 		error = seq_open(file, NULL);
680 	if (error)
681 		goto err_free;
682 
683 	((struct seq_file *)file->private_data)->private = of;
684 
685 	/* seq_file clears PWRITE unconditionally, restore it if WRITE */
686 	if (file->f_mode & FMODE_WRITE)
687 		file->f_mode |= FMODE_PWRITE;
688 
689 	/* make sure we have open node struct */
690 	error = kernfs_get_open_node(kn, of);
691 	if (error)
692 		goto err_close;
693 
694 	/* open succeeded, put active references */
695 	kernfs_put_active(kn);
696 	return 0;
697 
698 err_close:
699 	seq_release(inode, file);
700 err_free:
701 	kfree(of);
702 err_out:
703 	kernfs_put_active(kn);
704 	return error;
705 }
706 
707 static int kernfs_fop_release(struct inode *inode, struct file *filp)
708 {
709 	struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
710 	struct kernfs_open_file *of = kernfs_of(filp);
711 
712 	kernfs_put_open_node(kn, of);
713 	seq_release(inode, filp);
714 	kfree(of);
715 
716 	return 0;
717 }
718 
719 void kernfs_unmap_bin_file(struct kernfs_node *kn)
720 {
721 	struct kernfs_open_node *on;
722 	struct kernfs_open_file *of;
723 
724 	if (!(kn->flags & KERNFS_HAS_MMAP))
725 		return;
726 
727 	spin_lock_irq(&kernfs_open_node_lock);
728 	on = kn->attr.open;
729 	if (on)
730 		atomic_inc(&on->refcnt);
731 	spin_unlock_irq(&kernfs_open_node_lock);
732 	if (!on)
733 		return;
734 
735 	mutex_lock(&kernfs_open_file_mutex);
736 	list_for_each_entry(of, &on->files, list) {
737 		struct inode *inode = file_inode(of->file);
738 		unmap_mapping_range(inode->i_mapping, 0, 0, 1);
739 	}
740 	mutex_unlock(&kernfs_open_file_mutex);
741 
742 	kernfs_put_open_node(kn, NULL);
743 }
744 
745 /*
746  * Kernfs attribute files are pollable.  The idea is that you read
747  * the content and then you use 'poll' or 'select' to wait for
748  * the content to change.  When the content changes (assuming the
749  * manager for the kobject supports notification), poll will
750  * return POLLERR|POLLPRI, and select will return the fd whether
751  * it is waiting for read, write, or exceptions.
752  * Once poll/select indicates that the value has changed, you
753  * need to close and re-open the file, or seek to 0 and read again.
754  * Reminder: this only works for attributes which actively support
755  * it, and it is not possible to test an attribute from userspace
756  * to see if it supports poll (Neither 'poll' nor 'select' return
757  * an appropriate error code).  When in doubt, set a suitable timeout value.
758  */
759 static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
760 {
761 	struct kernfs_open_file *of = kernfs_of(filp);
762 	struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
763 	struct kernfs_open_node *on = kn->attr.open;
764 
765 	/* need parent for the kobj, grab both */
766 	if (!kernfs_get_active(kn))
767 		goto trigger;
768 
769 	poll_wait(filp, &on->poll, wait);
770 
771 	kernfs_put_active(kn);
772 
773 	if (of->event != atomic_read(&on->event))
774 		goto trigger;
775 
776 	return DEFAULT_POLLMASK;
777 
778  trigger:
779 	return DEFAULT_POLLMASK|POLLERR|POLLPRI;
780 }
781 
782 /**
783  * kernfs_notify - notify a kernfs file
784  * @kn: file to notify
785  *
786  * Notify @kn such that poll(2) on @kn wakes up.
787  */
788 void kernfs_notify(struct kernfs_node *kn)
789 {
790 	struct kernfs_open_node *on;
791 	unsigned long flags;
792 
793 	spin_lock_irqsave(&kernfs_open_node_lock, flags);
794 
795 	if (!WARN_ON(kernfs_type(kn) != KERNFS_FILE)) {
796 		on = kn->attr.open;
797 		if (on) {
798 			atomic_inc(&on->event);
799 			wake_up_interruptible(&on->poll);
800 		}
801 	}
802 
803 	spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
804 }
805 EXPORT_SYMBOL_GPL(kernfs_notify);
806 
807 const struct file_operations kernfs_file_fops = {
808 	.read		= kernfs_fop_read,
809 	.write		= kernfs_fop_write,
810 	.llseek		= generic_file_llseek,
811 	.mmap		= kernfs_fop_mmap,
812 	.open		= kernfs_fop_open,
813 	.release	= kernfs_fop_release,
814 	.poll		= kernfs_fop_poll,
815 };
816 
817 /**
818  * __kernfs_create_file - kernfs internal function to create a file
819  * @parent: directory to create the file in
820  * @name: name of the file
821  * @mode: mode of the file
822  * @size: size of the file
823  * @ops: kernfs operations for the file
824  * @priv: private data for the file
825  * @ns: optional namespace tag of the file
826  * @static_name: don't copy file name
827  * @key: lockdep key for the file's active_ref, %NULL to disable lockdep
828  *
829  * Returns the created node on success, ERR_PTR() value on error.
830  */
831 struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
832 					 const char *name,
833 					 umode_t mode, loff_t size,
834 					 const struct kernfs_ops *ops,
835 					 void *priv, const void *ns,
836 					 bool name_is_static,
837 					 struct lock_class_key *key)
838 {
839 	struct kernfs_node *kn;
840 	unsigned flags;
841 	int rc;
842 
843 	flags = KERNFS_FILE;
844 	if (name_is_static)
845 		flags |= KERNFS_STATIC_NAME;
846 
847 	kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, flags);
848 	if (!kn)
849 		return ERR_PTR(-ENOMEM);
850 
851 	kn->attr.ops = ops;
852 	kn->attr.size = size;
853 	kn->ns = ns;
854 	kn->priv = priv;
855 
856 #ifdef CONFIG_DEBUG_LOCK_ALLOC
857 	if (key) {
858 		lockdep_init_map(&kn->dep_map, "s_active", key, 0);
859 		kn->flags |= KERNFS_LOCKDEP;
860 	}
861 #endif
862 
863 	/*
864 	 * kn->attr.ops is accesible only while holding active ref.  We
865 	 * need to know whether some ops are implemented outside active
866 	 * ref.  Cache their existence in flags.
867 	 */
868 	if (ops->seq_show)
869 		kn->flags |= KERNFS_HAS_SEQ_SHOW;
870 	if (ops->mmap)
871 		kn->flags |= KERNFS_HAS_MMAP;
872 
873 	rc = kernfs_add_one(kn);
874 	if (rc) {
875 		kernfs_put(kn);
876 		return ERR_PTR(rc);
877 	}
878 	return kn;
879 }
880