xref: /linux/fs/kernfs/file.c (revision 71dfa617ea9f18e4585fe78364217cd32b1fc382)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * fs/kernfs/file.c - kernfs file implementation
4  *
5  * Copyright (c) 2001-3 Patrick Mochel
6  * Copyright (c) 2007 SUSE Linux Products GmbH
7  * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
8  */
9 
10 #include <linux/fs.h>
11 #include <linux/seq_file.h>
12 #include <linux/slab.h>
13 #include <linux/poll.h>
14 #include <linux/pagemap.h>
15 #include <linux/sched/mm.h>
16 #include <linux/fsnotify.h>
17 #include <linux/uio.h>
18 
19 #include "kernfs-internal.h"
20 
21 struct kernfs_open_node {
22 	struct rcu_head		rcu_head;
23 	atomic_t		event;
24 	wait_queue_head_t	poll;
25 	struct list_head	files; /* goes through kernfs_open_file.list */
26 	unsigned int		nr_mmapped;
27 	unsigned int		nr_to_release;
28 };
29 
30 /*
31  * kernfs_notify() may be called from any context and bounces notifications
32  * through a work item.  To minimize space overhead in kernfs_node, the
33  * pending queue is implemented as a singly linked list of kernfs_nodes.
34  * The list is terminated with the self pointer so that whether a
35  * kernfs_node is on the list or not can be determined by testing the next
36  * pointer for %NULL.
37  */
38 #define KERNFS_NOTIFY_EOL			((void *)&kernfs_notify_list)
39 
40 static DEFINE_SPINLOCK(kernfs_notify_lock);
41 static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL;
42 
43 static inline struct mutex *kernfs_open_file_mutex_ptr(struct kernfs_node *kn)
44 {
45 	int idx = hash_ptr(kn, NR_KERNFS_LOCK_BITS);
46 
47 	return &kernfs_locks->open_file_mutex[idx];
48 }
49 
50 static inline struct mutex *kernfs_open_file_mutex_lock(struct kernfs_node *kn)
51 {
52 	struct mutex *lock;
53 
54 	lock = kernfs_open_file_mutex_ptr(kn);
55 
56 	mutex_lock(lock);
57 
58 	return lock;
59 }
60 
61 /**
62  * of_on - Get the kernfs_open_node of the specified kernfs_open_file
63  * @of: target kernfs_open_file
64  *
65  * Return: the kernfs_open_node of the kernfs_open_file
66  */
67 static struct kernfs_open_node *of_on(struct kernfs_open_file *of)
68 {
69 	return rcu_dereference_protected(of->kn->attr.open,
70 					 !list_empty(&of->list));
71 }
72 
73 /**
74  * kernfs_deref_open_node_locked - Get kernfs_open_node corresponding to @kn
75  *
76  * @kn: target kernfs_node.
77  *
78  * Fetch and return ->attr.open of @kn when caller holds the
79  * kernfs_open_file_mutex_ptr(kn).
80  *
81  * Update of ->attr.open happens under kernfs_open_file_mutex_ptr(kn). So when
82  * the caller guarantees that this mutex is being held, other updaters can't
83  * change ->attr.open and this means that we can safely deref ->attr.open
84  * outside RCU read-side critical section.
85  *
86  * The caller needs to make sure that kernfs_open_file_mutex is held.
87  *
88  * Return: @kn->attr.open when kernfs_open_file_mutex is held.
89  */
90 static struct kernfs_open_node *
91 kernfs_deref_open_node_locked(struct kernfs_node *kn)
92 {
93 	return rcu_dereference_protected(kn->attr.open,
94 				lockdep_is_held(kernfs_open_file_mutex_ptr(kn)));
95 }
96 
97 static struct kernfs_open_file *kernfs_of(struct file *file)
98 {
99 	return ((struct seq_file *)file->private_data)->private;
100 }
101 
102 /*
103  * Determine the kernfs_ops for the given kernfs_node.  This function must
104  * be called while holding an active reference.
105  */
106 static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn)
107 {
108 	if (kn->flags & KERNFS_LOCKDEP)
109 		lockdep_assert_held(kn);
110 	return kn->attr.ops;
111 }
112 
113 /*
114  * As kernfs_seq_stop() is also called after kernfs_seq_start() or
115  * kernfs_seq_next() failure, it needs to distinguish whether it's stopping
116  * a seq_file iteration which is fully initialized with an active reference
117  * or an aborted kernfs_seq_start() due to get_active failure.  The
118  * position pointer is the only context for each seq_file iteration and
119  * thus the stop condition should be encoded in it.  As the return value is
120  * directly visible to userland, ERR_PTR(-ENODEV) is the only acceptable
121  * choice to indicate get_active failure.
122  *
123  * Unfortunately, this is complicated due to the optional custom seq_file
124  * operations which may return ERR_PTR(-ENODEV) too.  kernfs_seq_stop()
125  * can't distinguish whether ERR_PTR(-ENODEV) is from get_active failure or
126  * custom seq_file operations and thus can't decide whether put_active
127  * should be performed or not only on ERR_PTR(-ENODEV).
128  *
129  * This is worked around by factoring out the custom seq_stop() and
130  * put_active part into kernfs_seq_stop_active(), skipping it from
131  * kernfs_seq_stop() if ERR_PTR(-ENODEV) while invoking it directly after
132  * custom seq_file operations fail with ERR_PTR(-ENODEV) - this ensures
133  * that kernfs_seq_stop_active() is skipped only after get_active failure.
134  */
135 static void kernfs_seq_stop_active(struct seq_file *sf, void *v)
136 {
137 	struct kernfs_open_file *of = sf->private;
138 	const struct kernfs_ops *ops = kernfs_ops(of->kn);
139 
140 	if (ops->seq_stop)
141 		ops->seq_stop(sf, v);
142 	kernfs_put_active(of->kn);
143 }
144 
145 static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
146 {
147 	struct kernfs_open_file *of = sf->private;
148 	const struct kernfs_ops *ops;
149 
150 	/*
151 	 * @of->mutex nests outside active ref and is primarily to ensure that
152 	 * the ops aren't called concurrently for the same open file.
153 	 */
154 	mutex_lock(&of->mutex);
155 	if (!kernfs_get_active(of->kn))
156 		return ERR_PTR(-ENODEV);
157 
158 	ops = kernfs_ops(of->kn);
159 	if (ops->seq_start) {
160 		void *next = ops->seq_start(sf, ppos);
161 		/* see the comment above kernfs_seq_stop_active() */
162 		if (next == ERR_PTR(-ENODEV))
163 			kernfs_seq_stop_active(sf, next);
164 		return next;
165 	}
166 	return single_start(sf, ppos);
167 }
168 
169 static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
170 {
171 	struct kernfs_open_file *of = sf->private;
172 	const struct kernfs_ops *ops = kernfs_ops(of->kn);
173 
174 	if (ops->seq_next) {
175 		void *next = ops->seq_next(sf, v, ppos);
176 		/* see the comment above kernfs_seq_stop_active() */
177 		if (next == ERR_PTR(-ENODEV))
178 			kernfs_seq_stop_active(sf, next);
179 		return next;
180 	} else {
181 		/*
182 		 * The same behavior and code as single_open(), always
183 		 * terminate after the initial read.
184 		 */
185 		++*ppos;
186 		return NULL;
187 	}
188 }
189 
190 static void kernfs_seq_stop(struct seq_file *sf, void *v)
191 {
192 	struct kernfs_open_file *of = sf->private;
193 
194 	if (v != ERR_PTR(-ENODEV))
195 		kernfs_seq_stop_active(sf, v);
196 	mutex_unlock(&of->mutex);
197 }
198 
199 static int kernfs_seq_show(struct seq_file *sf, void *v)
200 {
201 	struct kernfs_open_file *of = sf->private;
202 
203 	of->event = atomic_read(&of_on(of)->event);
204 
205 	return of->kn->attr.ops->seq_show(sf, v);
206 }
207 
208 static const struct seq_operations kernfs_seq_ops = {
209 	.start = kernfs_seq_start,
210 	.next = kernfs_seq_next,
211 	.stop = kernfs_seq_stop,
212 	.show = kernfs_seq_show,
213 };
214 
215 /*
216  * As reading a bin file can have side-effects, the exact offset and bytes
217  * specified in read(2) call should be passed to the read callback making
218  * it difficult to use seq_file.  Implement simplistic custom buffering for
219  * bin files.
220  */
221 static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
222 {
223 	struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
224 	ssize_t len = min_t(size_t, iov_iter_count(iter), PAGE_SIZE);
225 	const struct kernfs_ops *ops;
226 	char *buf;
227 
228 	buf = of->prealloc_buf;
229 	if (buf)
230 		mutex_lock(&of->prealloc_mutex);
231 	else
232 		buf = kmalloc(len, GFP_KERNEL);
233 	if (!buf)
234 		return -ENOMEM;
235 
236 	/*
237 	 * @of->mutex nests outside active ref and is used both to ensure that
238 	 * the ops aren't called concurrently for the same open file.
239 	 */
240 	mutex_lock(&of->mutex);
241 	if (!kernfs_get_active(of->kn)) {
242 		len = -ENODEV;
243 		mutex_unlock(&of->mutex);
244 		goto out_free;
245 	}
246 
247 	of->event = atomic_read(&of_on(of)->event);
248 
249 	ops = kernfs_ops(of->kn);
250 	if (ops->read)
251 		len = ops->read(of, buf, len, iocb->ki_pos);
252 	else
253 		len = -EINVAL;
254 
255 	kernfs_put_active(of->kn);
256 	mutex_unlock(&of->mutex);
257 
258 	if (len < 0)
259 		goto out_free;
260 
261 	if (copy_to_iter(buf, len, iter) != len) {
262 		len = -EFAULT;
263 		goto out_free;
264 	}
265 
266 	iocb->ki_pos += len;
267 
268  out_free:
269 	if (buf == of->prealloc_buf)
270 		mutex_unlock(&of->prealloc_mutex);
271 	else
272 		kfree(buf);
273 	return len;
274 }
275 
276 static ssize_t kernfs_fop_read_iter(struct kiocb *iocb, struct iov_iter *iter)
277 {
278 	if (kernfs_of(iocb->ki_filp)->kn->flags & KERNFS_HAS_SEQ_SHOW)
279 		return seq_read_iter(iocb, iter);
280 	return kernfs_file_read_iter(iocb, iter);
281 }
282 
283 /*
284  * Copy data in from userland and pass it to the matching kernfs write
285  * operation.
286  *
287  * There is no easy way for us to know if userspace is only doing a partial
288  * write, so we don't support them. We expect the entire buffer to come on
289  * the first write.  Hint: if you're writing a value, first read the file,
290  * modify only the value you're changing, then write entire buffer
291  * back.
292  */
293 static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
294 {
295 	struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
296 	ssize_t len = iov_iter_count(iter);
297 	const struct kernfs_ops *ops;
298 	char *buf;
299 
300 	if (of->atomic_write_len) {
301 		if (len > of->atomic_write_len)
302 			return -E2BIG;
303 	} else {
304 		len = min_t(size_t, len, PAGE_SIZE);
305 	}
306 
307 	buf = of->prealloc_buf;
308 	if (buf)
309 		mutex_lock(&of->prealloc_mutex);
310 	else
311 		buf = kmalloc(len + 1, GFP_KERNEL);
312 	if (!buf)
313 		return -ENOMEM;
314 
315 	if (copy_from_iter(buf, len, iter) != len) {
316 		len = -EFAULT;
317 		goto out_free;
318 	}
319 	buf[len] = '\0';	/* guarantee string termination */
320 
321 	/*
322 	 * @of->mutex nests outside active ref and is used both to ensure that
323 	 * the ops aren't called concurrently for the same open file.
324 	 */
325 	mutex_lock(&of->mutex);
326 	if (!kernfs_get_active(of->kn)) {
327 		mutex_unlock(&of->mutex);
328 		len = -ENODEV;
329 		goto out_free;
330 	}
331 
332 	ops = kernfs_ops(of->kn);
333 	if (ops->write)
334 		len = ops->write(of, buf, len, iocb->ki_pos);
335 	else
336 		len = -EINVAL;
337 
338 	kernfs_put_active(of->kn);
339 	mutex_unlock(&of->mutex);
340 
341 	if (len > 0)
342 		iocb->ki_pos += len;
343 
344 out_free:
345 	if (buf == of->prealloc_buf)
346 		mutex_unlock(&of->prealloc_mutex);
347 	else
348 		kfree(buf);
349 	return len;
350 }
351 
352 static void kernfs_vma_open(struct vm_area_struct *vma)
353 {
354 	struct file *file = vma->vm_file;
355 	struct kernfs_open_file *of = kernfs_of(file);
356 
357 	if (!of->vm_ops)
358 		return;
359 
360 	if (!kernfs_get_active(of->kn))
361 		return;
362 
363 	if (of->vm_ops->open)
364 		of->vm_ops->open(vma);
365 
366 	kernfs_put_active(of->kn);
367 }
368 
369 static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf)
370 {
371 	struct file *file = vmf->vma->vm_file;
372 	struct kernfs_open_file *of = kernfs_of(file);
373 	vm_fault_t ret;
374 
375 	if (!of->vm_ops)
376 		return VM_FAULT_SIGBUS;
377 
378 	if (!kernfs_get_active(of->kn))
379 		return VM_FAULT_SIGBUS;
380 
381 	ret = VM_FAULT_SIGBUS;
382 	if (of->vm_ops->fault)
383 		ret = of->vm_ops->fault(vmf);
384 
385 	kernfs_put_active(of->kn);
386 	return ret;
387 }
388 
389 static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault *vmf)
390 {
391 	struct file *file = vmf->vma->vm_file;
392 	struct kernfs_open_file *of = kernfs_of(file);
393 	vm_fault_t ret;
394 
395 	if (!of->vm_ops)
396 		return VM_FAULT_SIGBUS;
397 
398 	if (!kernfs_get_active(of->kn))
399 		return VM_FAULT_SIGBUS;
400 
401 	ret = 0;
402 	if (of->vm_ops->page_mkwrite)
403 		ret = of->vm_ops->page_mkwrite(vmf);
404 	else
405 		file_update_time(file);
406 
407 	kernfs_put_active(of->kn);
408 	return ret;
409 }
410 
411 static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
412 			     void *buf, int len, int write)
413 {
414 	struct file *file = vma->vm_file;
415 	struct kernfs_open_file *of = kernfs_of(file);
416 	int ret;
417 
418 	if (!of->vm_ops)
419 		return -EINVAL;
420 
421 	if (!kernfs_get_active(of->kn))
422 		return -EINVAL;
423 
424 	ret = -EINVAL;
425 	if (of->vm_ops->access)
426 		ret = of->vm_ops->access(vma, addr, buf, len, write);
427 
428 	kernfs_put_active(of->kn);
429 	return ret;
430 }
431 
432 static const struct vm_operations_struct kernfs_vm_ops = {
433 	.open		= kernfs_vma_open,
434 	.fault		= kernfs_vma_fault,
435 	.page_mkwrite	= kernfs_vma_page_mkwrite,
436 	.access		= kernfs_vma_access,
437 };
438 
439 static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
440 {
441 	struct kernfs_open_file *of = kernfs_of(file);
442 	const struct kernfs_ops *ops;
443 	int rc;
444 
445 	/*
446 	 * mmap path and of->mutex are prone to triggering spurious lockdep
447 	 * warnings and we don't want to add spurious locking dependency
448 	 * between the two.  Check whether mmap is actually implemented
449 	 * without grabbing @of->mutex by testing HAS_MMAP flag.  See the
450 	 * comment in kernfs_fop_open() for more details.
451 	 */
452 	if (!(of->kn->flags & KERNFS_HAS_MMAP))
453 		return -ENODEV;
454 
455 	mutex_lock(&of->mutex);
456 
457 	rc = -ENODEV;
458 	if (!kernfs_get_active(of->kn))
459 		goto out_unlock;
460 
461 	ops = kernfs_ops(of->kn);
462 	rc = ops->mmap(of, vma);
463 	if (rc)
464 		goto out_put;
465 
466 	/*
467 	 * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
468 	 * to satisfy versions of X which crash if the mmap fails: that
469 	 * substitutes a new vm_file, and we don't then want bin_vm_ops.
470 	 */
471 	if (vma->vm_file != file)
472 		goto out_put;
473 
474 	rc = -EINVAL;
475 	if (of->mmapped && of->vm_ops != vma->vm_ops)
476 		goto out_put;
477 
478 	/*
479 	 * It is not possible to successfully wrap close.
480 	 * So error if someone is trying to use close.
481 	 */
482 	if (vma->vm_ops && vma->vm_ops->close)
483 		goto out_put;
484 
485 	rc = 0;
486 	if (!of->mmapped) {
487 		of->mmapped = true;
488 		of_on(of)->nr_mmapped++;
489 		of->vm_ops = vma->vm_ops;
490 	}
491 	vma->vm_ops = &kernfs_vm_ops;
492 out_put:
493 	kernfs_put_active(of->kn);
494 out_unlock:
495 	mutex_unlock(&of->mutex);
496 
497 	return rc;
498 }
499 
500 /**
501  *	kernfs_get_open_node - get or create kernfs_open_node
502  *	@kn: target kernfs_node
503  *	@of: kernfs_open_file for this instance of open
504  *
505  *	If @kn->attr.open exists, increment its reference count; otherwise,
506  *	create one.  @of is chained to the files list.
507  *
508  *	Locking:
509  *	Kernel thread context (may sleep).
510  *
511  *	Return:
512  *	%0 on success, -errno on failure.
513  */
514 static int kernfs_get_open_node(struct kernfs_node *kn,
515 				struct kernfs_open_file *of)
516 {
517 	struct kernfs_open_node *on;
518 	struct mutex *mutex;
519 
520 	mutex = kernfs_open_file_mutex_lock(kn);
521 	on = kernfs_deref_open_node_locked(kn);
522 
523 	if (!on) {
524 		/* not there, initialize a new one */
525 		on = kzalloc(sizeof(*on), GFP_KERNEL);
526 		if (!on) {
527 			mutex_unlock(mutex);
528 			return -ENOMEM;
529 		}
530 		atomic_set(&on->event, 1);
531 		init_waitqueue_head(&on->poll);
532 		INIT_LIST_HEAD(&on->files);
533 		rcu_assign_pointer(kn->attr.open, on);
534 	}
535 
536 	list_add_tail(&of->list, &on->files);
537 	if (kn->flags & KERNFS_HAS_RELEASE)
538 		on->nr_to_release++;
539 
540 	mutex_unlock(mutex);
541 	return 0;
542 }
543 
544 /**
545  *	kernfs_unlink_open_file - Unlink @of from @kn.
546  *
547  *	@kn: target kernfs_node
548  *	@of: associated kernfs_open_file
549  *	@open_failed: ->open() failed, cancel ->release()
550  *
551  *	Unlink @of from list of @kn's associated open files. If list of
552  *	associated open files becomes empty, disassociate and free
553  *	kernfs_open_node.
554  *
555  *	LOCKING:
556  *	None.
557  */
558 static void kernfs_unlink_open_file(struct kernfs_node *kn,
559 				    struct kernfs_open_file *of,
560 				    bool open_failed)
561 {
562 	struct kernfs_open_node *on;
563 	struct mutex *mutex;
564 
565 	mutex = kernfs_open_file_mutex_lock(kn);
566 
567 	on = kernfs_deref_open_node_locked(kn);
568 	if (!on) {
569 		mutex_unlock(mutex);
570 		return;
571 	}
572 
573 	if (of) {
574 		if (kn->flags & KERNFS_HAS_RELEASE) {
575 			WARN_ON_ONCE(of->released == open_failed);
576 			if (open_failed)
577 				on->nr_to_release--;
578 		}
579 		if (of->mmapped)
580 			on->nr_mmapped--;
581 		list_del(&of->list);
582 	}
583 
584 	if (list_empty(&on->files)) {
585 		rcu_assign_pointer(kn->attr.open, NULL);
586 		kfree_rcu(on, rcu_head);
587 	}
588 
589 	mutex_unlock(mutex);
590 }
591 
592 static int kernfs_fop_open(struct inode *inode, struct file *file)
593 {
594 	struct kernfs_node *kn = inode->i_private;
595 	struct kernfs_root *root = kernfs_root(kn);
596 	const struct kernfs_ops *ops;
597 	struct kernfs_open_file *of;
598 	bool has_read, has_write, has_mmap;
599 	int error = -EACCES;
600 
601 	if (!kernfs_get_active(kn))
602 		return -ENODEV;
603 
604 	ops = kernfs_ops(kn);
605 
606 	has_read = ops->seq_show || ops->read || ops->mmap;
607 	has_write = ops->write || ops->mmap;
608 	has_mmap = ops->mmap;
609 
610 	/* see the flag definition for details */
611 	if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) {
612 		if ((file->f_mode & FMODE_WRITE) &&
613 		    (!(inode->i_mode & S_IWUGO) || !has_write))
614 			goto err_out;
615 
616 		if ((file->f_mode & FMODE_READ) &&
617 		    (!(inode->i_mode & S_IRUGO) || !has_read))
618 			goto err_out;
619 	}
620 
621 	/* allocate a kernfs_open_file for the file */
622 	error = -ENOMEM;
623 	of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL);
624 	if (!of)
625 		goto err_out;
626 
627 	/*
628 	 * The following is done to give a different lockdep key to
629 	 * @of->mutex for files which implement mmap.  This is a rather
630 	 * crude way to avoid false positive lockdep warning around
631 	 * mm->mmap_lock - mmap nests @of->mutex under mm->mmap_lock and
632 	 * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
633 	 * which mm->mmap_lock nests, while holding @of->mutex.  As each
634 	 * open file has a separate mutex, it's okay as long as those don't
635 	 * happen on the same file.  At this point, we can't easily give
636 	 * each file a separate locking class.  Let's differentiate on
637 	 * whether the file has mmap or not for now.
638 	 *
639 	 * For similar reasons, writable and readonly files are given different
640 	 * lockdep key, because the writable file /sys/power/resume may call vfs
641 	 * lookup helpers for arbitrary paths and readonly files can be read by
642 	 * overlayfs from vfs helpers when sysfs is a lower layer of overalyfs.
643 	 *
644 	 * All three cases look the same.  They're supposed to
645 	 * look that way and give @of->mutex different static lockdep keys.
646 	 */
647 	if (has_mmap)
648 		mutex_init(&of->mutex);
649 	else if (file->f_mode & FMODE_WRITE)
650 		mutex_init(&of->mutex);
651 	else
652 		mutex_init(&of->mutex);
653 
654 	of->kn = kn;
655 	of->file = file;
656 
657 	/*
658 	 * Write path needs to atomic_write_len outside active reference.
659 	 * Cache it in open_file.  See kernfs_fop_write_iter() for details.
660 	 */
661 	of->atomic_write_len = ops->atomic_write_len;
662 
663 	error = -EINVAL;
664 	/*
665 	 * ->seq_show is incompatible with ->prealloc,
666 	 * as seq_read does its own allocation.
667 	 * ->read must be used instead.
668 	 */
669 	if (ops->prealloc && ops->seq_show)
670 		goto err_free;
671 	if (ops->prealloc) {
672 		int len = of->atomic_write_len ?: PAGE_SIZE;
673 		of->prealloc_buf = kmalloc(len + 1, GFP_KERNEL);
674 		error = -ENOMEM;
675 		if (!of->prealloc_buf)
676 			goto err_free;
677 		mutex_init(&of->prealloc_mutex);
678 	}
679 
680 	/*
681 	 * Always instantiate seq_file even if read access doesn't use
682 	 * seq_file or is not requested.  This unifies private data access
683 	 * and readable regular files are the vast majority anyway.
684 	 */
685 	if (ops->seq_show)
686 		error = seq_open(file, &kernfs_seq_ops);
687 	else
688 		error = seq_open(file, NULL);
689 	if (error)
690 		goto err_free;
691 
692 	of->seq_file = file->private_data;
693 	of->seq_file->private = of;
694 
695 	/* seq_file clears PWRITE unconditionally, restore it if WRITE */
696 	if (file->f_mode & FMODE_WRITE)
697 		file->f_mode |= FMODE_PWRITE;
698 
699 	/* make sure we have open node struct */
700 	error = kernfs_get_open_node(kn, of);
701 	if (error)
702 		goto err_seq_release;
703 
704 	if (ops->open) {
705 		/* nobody has access to @of yet, skip @of->mutex */
706 		error = ops->open(of);
707 		if (error)
708 			goto err_put_node;
709 	}
710 
711 	/* open succeeded, put active references */
712 	kernfs_put_active(kn);
713 	return 0;
714 
715 err_put_node:
716 	kernfs_unlink_open_file(kn, of, true);
717 err_seq_release:
718 	seq_release(inode, file);
719 err_free:
720 	kfree(of->prealloc_buf);
721 	kfree(of);
722 err_out:
723 	kernfs_put_active(kn);
724 	return error;
725 }
726 
727 /* used from release/drain to ensure that ->release() is called exactly once */
728 static void kernfs_release_file(struct kernfs_node *kn,
729 				struct kernfs_open_file *of)
730 {
731 	/*
732 	 * @of is guaranteed to have no other file operations in flight and
733 	 * we just want to synchronize release and drain paths.
734 	 * @kernfs_open_file_mutex_ptr(kn) is enough. @of->mutex can't be used
735 	 * here because drain path may be called from places which can
736 	 * cause circular dependency.
737 	 */
738 	lockdep_assert_held(kernfs_open_file_mutex_ptr(kn));
739 
740 	if (!of->released) {
741 		/*
742 		 * A file is never detached without being released and we
743 		 * need to be able to release files which are deactivated
744 		 * and being drained.  Don't use kernfs_ops().
745 		 */
746 		kn->attr.ops->release(of);
747 		of->released = true;
748 		of_on(of)->nr_to_release--;
749 	}
750 }
751 
752 static int kernfs_fop_release(struct inode *inode, struct file *filp)
753 {
754 	struct kernfs_node *kn = inode->i_private;
755 	struct kernfs_open_file *of = kernfs_of(filp);
756 
757 	if (kn->flags & KERNFS_HAS_RELEASE) {
758 		struct mutex *mutex;
759 
760 		mutex = kernfs_open_file_mutex_lock(kn);
761 		kernfs_release_file(kn, of);
762 		mutex_unlock(mutex);
763 	}
764 
765 	kernfs_unlink_open_file(kn, of, false);
766 	seq_release(inode, filp);
767 	kfree(of->prealloc_buf);
768 	kfree(of);
769 
770 	return 0;
771 }
772 
773 bool kernfs_should_drain_open_files(struct kernfs_node *kn)
774 {
775 	struct kernfs_open_node *on;
776 	bool ret;
777 
778 	/*
779 	 * @kn being deactivated guarantees that @kn->attr.open can't change
780 	 * beneath us making the lockless test below safe.
781 	 */
782 	WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
783 
784 	rcu_read_lock();
785 	on = rcu_dereference(kn->attr.open);
786 	ret = on && (on->nr_mmapped || on->nr_to_release);
787 	rcu_read_unlock();
788 
789 	return ret;
790 }
791 
792 void kernfs_drain_open_files(struct kernfs_node *kn)
793 {
794 	struct kernfs_open_node *on;
795 	struct kernfs_open_file *of;
796 	struct mutex *mutex;
797 
798 	mutex = kernfs_open_file_mutex_lock(kn);
799 	on = kernfs_deref_open_node_locked(kn);
800 	if (!on) {
801 		mutex_unlock(mutex);
802 		return;
803 	}
804 
805 	list_for_each_entry(of, &on->files, list) {
806 		struct inode *inode = file_inode(of->file);
807 
808 		if (of->mmapped) {
809 			unmap_mapping_range(inode->i_mapping, 0, 0, 1);
810 			of->mmapped = false;
811 			on->nr_mmapped--;
812 		}
813 
814 		if (kn->flags & KERNFS_HAS_RELEASE)
815 			kernfs_release_file(kn, of);
816 	}
817 
818 	WARN_ON_ONCE(on->nr_mmapped || on->nr_to_release);
819 	mutex_unlock(mutex);
820 }
821 
822 /*
823  * Kernfs attribute files are pollable.  The idea is that you read
824  * the content and then you use 'poll' or 'select' to wait for
825  * the content to change.  When the content changes (assuming the
826  * manager for the kobject supports notification), poll will
827  * return EPOLLERR|EPOLLPRI, and select will return the fd whether
828  * it is waiting for read, write, or exceptions.
829  * Once poll/select indicates that the value has changed, you
830  * need to close and re-open the file, or seek to 0 and read again.
831  * Reminder: this only works for attributes which actively support
832  * it, and it is not possible to test an attribute from userspace
833  * to see if it supports poll (Neither 'poll' nor 'select' return
834  * an appropriate error code).  When in doubt, set a suitable timeout value.
835  */
836 __poll_t kernfs_generic_poll(struct kernfs_open_file *of, poll_table *wait)
837 {
838 	struct kernfs_open_node *on = of_on(of);
839 
840 	poll_wait(of->file, &on->poll, wait);
841 
842 	if (of->event != atomic_read(&on->event))
843 		return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
844 
845 	return DEFAULT_POLLMASK;
846 }
847 
848 static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
849 {
850 	struct kernfs_open_file *of = kernfs_of(filp);
851 	struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry);
852 	__poll_t ret;
853 
854 	if (!kernfs_get_active(kn))
855 		return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
856 
857 	if (kn->attr.ops->poll)
858 		ret = kn->attr.ops->poll(of, wait);
859 	else
860 		ret = kernfs_generic_poll(of, wait);
861 
862 	kernfs_put_active(kn);
863 	return ret;
864 }
865 
866 static loff_t kernfs_fop_llseek(struct file *file, loff_t offset, int whence)
867 {
868 	struct kernfs_open_file *of = kernfs_of(file);
869 	const struct kernfs_ops *ops;
870 	loff_t ret;
871 
872 	/*
873 	 * @of->mutex nests outside active ref and is primarily to ensure that
874 	 * the ops aren't called concurrently for the same open file.
875 	 */
876 	mutex_lock(&of->mutex);
877 	if (!kernfs_get_active(of->kn)) {
878 		mutex_unlock(&of->mutex);
879 		return -ENODEV;
880 	}
881 
882 	ops = kernfs_ops(of->kn);
883 	if (ops->llseek)
884 		ret = ops->llseek(of, offset, whence);
885 	else
886 		ret = generic_file_llseek(file, offset, whence);
887 
888 	kernfs_put_active(of->kn);
889 	mutex_unlock(&of->mutex);
890 	return ret;
891 }
892 
893 static void kernfs_notify_workfn(struct work_struct *work)
894 {
895 	struct kernfs_node *kn;
896 	struct kernfs_super_info *info;
897 	struct kernfs_root *root;
898 repeat:
899 	/* pop one off the notify_list */
900 	spin_lock_irq(&kernfs_notify_lock);
901 	kn = kernfs_notify_list;
902 	if (kn == KERNFS_NOTIFY_EOL) {
903 		spin_unlock_irq(&kernfs_notify_lock);
904 		return;
905 	}
906 	kernfs_notify_list = kn->attr.notify_next;
907 	kn->attr.notify_next = NULL;
908 	spin_unlock_irq(&kernfs_notify_lock);
909 
910 	root = kernfs_root(kn);
911 	/* kick fsnotify */
912 
913 	down_read(&root->kernfs_supers_rwsem);
914 	list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
915 		struct kernfs_node *parent;
916 		struct inode *p_inode = NULL;
917 		struct inode *inode;
918 		struct qstr name;
919 
920 		/*
921 		 * We want fsnotify_modify() on @kn but as the
922 		 * modifications aren't originating from userland don't
923 		 * have the matching @file available.  Look up the inodes
924 		 * and generate the events manually.
925 		 */
926 		inode = ilookup(info->sb, kernfs_ino(kn));
927 		if (!inode)
928 			continue;
929 
930 		name = (struct qstr)QSTR_INIT(kn->name, strlen(kn->name));
931 		parent = kernfs_get_parent(kn);
932 		if (parent) {
933 			p_inode = ilookup(info->sb, kernfs_ino(parent));
934 			if (p_inode) {
935 				fsnotify(FS_MODIFY | FS_EVENT_ON_CHILD,
936 					 inode, FSNOTIFY_EVENT_INODE,
937 					 p_inode, &name, inode, 0);
938 				iput(p_inode);
939 			}
940 
941 			kernfs_put(parent);
942 		}
943 
944 		if (!p_inode)
945 			fsnotify_inode(inode, FS_MODIFY);
946 
947 		iput(inode);
948 	}
949 
950 	up_read(&root->kernfs_supers_rwsem);
951 	kernfs_put(kn);
952 	goto repeat;
953 }
954 
955 /**
956  * kernfs_notify - notify a kernfs file
957  * @kn: file to notify
958  *
959  * Notify @kn such that poll(2) on @kn wakes up.  Maybe be called from any
960  * context.
961  */
962 void kernfs_notify(struct kernfs_node *kn)
963 {
964 	static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn);
965 	unsigned long flags;
966 	struct kernfs_open_node *on;
967 
968 	if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
969 		return;
970 
971 	/* kick poll immediately */
972 	rcu_read_lock();
973 	on = rcu_dereference(kn->attr.open);
974 	if (on) {
975 		atomic_inc(&on->event);
976 		wake_up_interruptible(&on->poll);
977 	}
978 	rcu_read_unlock();
979 
980 	/* schedule work to kick fsnotify */
981 	spin_lock_irqsave(&kernfs_notify_lock, flags);
982 	if (!kn->attr.notify_next) {
983 		kernfs_get(kn);
984 		kn->attr.notify_next = kernfs_notify_list;
985 		kernfs_notify_list = kn;
986 		schedule_work(&kernfs_notify_work);
987 	}
988 	spin_unlock_irqrestore(&kernfs_notify_lock, flags);
989 }
990 EXPORT_SYMBOL_GPL(kernfs_notify);
991 
992 const struct file_operations kernfs_file_fops = {
993 	.read_iter	= kernfs_fop_read_iter,
994 	.write_iter	= kernfs_fop_write_iter,
995 	.llseek		= kernfs_fop_llseek,
996 	.mmap		= kernfs_fop_mmap,
997 	.open		= kernfs_fop_open,
998 	.release	= kernfs_fop_release,
999 	.poll		= kernfs_fop_poll,
1000 	.fsync		= noop_fsync,
1001 	.splice_read	= copy_splice_read,
1002 	.splice_write	= iter_file_splice_write,
1003 };
1004 
1005 /**
1006  * __kernfs_create_file - kernfs internal function to create a file
1007  * @parent: directory to create the file in
1008  * @name: name of the file
1009  * @mode: mode of the file
1010  * @uid: uid of the file
1011  * @gid: gid of the file
1012  * @size: size of the file
1013  * @ops: kernfs operations for the file
1014  * @priv: private data for the file
1015  * @ns: optional namespace tag of the file
1016  * @key: lockdep key for the file's active_ref, %NULL to disable lockdep
1017  *
1018  * Return: the created node on success, ERR_PTR() value on error.
1019  */
1020 struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
1021 					 const char *name,
1022 					 umode_t mode, kuid_t uid, kgid_t gid,
1023 					 loff_t size,
1024 					 const struct kernfs_ops *ops,
1025 					 void *priv, const void *ns,
1026 					 struct lock_class_key *key)
1027 {
1028 	struct kernfs_node *kn;
1029 	unsigned flags;
1030 	int rc;
1031 
1032 	flags = KERNFS_FILE;
1033 
1034 	kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG,
1035 			     uid, gid, flags);
1036 	if (!kn)
1037 		return ERR_PTR(-ENOMEM);
1038 
1039 	kn->attr.ops = ops;
1040 	kn->attr.size = size;
1041 	kn->ns = ns;
1042 	kn->priv = priv;
1043 
1044 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1045 	if (key) {
1046 		lockdep_init_map(&kn->dep_map, "kn->active", key, 0);
1047 		kn->flags |= KERNFS_LOCKDEP;
1048 	}
1049 #endif
1050 
1051 	/*
1052 	 * kn->attr.ops is accessible only while holding active ref.  We
1053 	 * need to know whether some ops are implemented outside active
1054 	 * ref.  Cache their existence in flags.
1055 	 */
1056 	if (ops->seq_show)
1057 		kn->flags |= KERNFS_HAS_SEQ_SHOW;
1058 	if (ops->mmap)
1059 		kn->flags |= KERNFS_HAS_MMAP;
1060 	if (ops->release)
1061 		kn->flags |= KERNFS_HAS_RELEASE;
1062 
1063 	rc = kernfs_add_one(kn);
1064 	if (rc) {
1065 		kernfs_put(kn);
1066 		return ERR_PTR(rc);
1067 	}
1068 	return kn;
1069 }
1070