xref: /linux/fs/proc/inode.c (revision b1992c3772e69a6fd0e3fc81cd4d2820c8b6eca0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/proc/inode.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  */
7 
8 #include <linux/cache.h>
9 #include <linux/time.h>
10 #include <linux/proc_fs.h>
11 #include <linux/kernel.h>
12 #include <linux/pid_namespace.h>
13 #include <linux/mm.h>
14 #include <linux/string.h>
15 #include <linux/stat.h>
16 #include <linux/completion.h>
17 #include <linux/poll.h>
18 #include <linux/printk.h>
19 #include <linux/file.h>
20 #include <linux/limits.h>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/sysctl.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/mount.h>
27 #include <linux/bug.h>
28 
29 #include "internal.h"
30 
31 static void proc_evict_inode(struct inode *inode)
32 {
33 	struct ctl_table_header *head;
34 	struct proc_inode *ei = PROC_I(inode);
35 
36 	truncate_inode_pages_final(&inode->i_data);
37 	clear_inode(inode);
38 
39 	/* Stop tracking associated processes */
40 	if (ei->pid)
41 		proc_pid_evict_inode(ei);
42 
43 	head = ei->sysctl;
44 	if (head) {
45 		RCU_INIT_POINTER(ei->sysctl, NULL);
46 		proc_sys_evict_inode(inode, head);
47 	}
48 }
49 
50 static struct kmem_cache *proc_inode_cachep __ro_after_init;
51 static struct kmem_cache *pde_opener_cache __ro_after_init;
52 
53 static struct inode *proc_alloc_inode(struct super_block *sb)
54 {
55 	struct proc_inode *ei;
56 
57 	ei = alloc_inode_sb(sb, proc_inode_cachep, GFP_KERNEL);
58 	if (!ei)
59 		return NULL;
60 	ei->pid = NULL;
61 	ei->fd = 0;
62 	ei->op.proc_get_link = NULL;
63 	ei->pde = NULL;
64 	ei->sysctl = NULL;
65 	ei->sysctl_entry = NULL;
66 	INIT_HLIST_NODE(&ei->sibling_inodes);
67 	ei->ns_ops = NULL;
68 	return &ei->vfs_inode;
69 }
70 
71 static void proc_free_inode(struct inode *inode)
72 {
73 	struct proc_inode *ei = PROC_I(inode);
74 
75 	if (ei->pid)
76 		put_pid(ei->pid);
77 	/* Let go of any associated proc directory entry */
78 	if (ei->pde)
79 		pde_put(ei->pde);
80 	kmem_cache_free(proc_inode_cachep, PROC_I(inode));
81 }
82 
83 static void init_once(void *foo)
84 {
85 	struct proc_inode *ei = (struct proc_inode *) foo;
86 
87 	inode_init_once(&ei->vfs_inode);
88 }
89 
90 void __init proc_init_kmemcache(void)
91 {
92 	proc_inode_cachep = kmem_cache_create("proc_inode_cache",
93 					     sizeof(struct proc_inode),
94 					     0, (SLAB_RECLAIM_ACCOUNT|
95 						SLAB_ACCOUNT|
96 						SLAB_PANIC),
97 					     init_once);
98 	pde_opener_cache =
99 		kmem_cache_create("pde_opener", sizeof(struct pde_opener), 0,
100 				  SLAB_ACCOUNT|SLAB_PANIC, NULL);
101 	proc_dir_entry_cache = kmem_cache_create_usercopy(
102 		"proc_dir_entry", SIZEOF_PDE, 0, SLAB_PANIC,
103 		offsetof(struct proc_dir_entry, inline_name),
104 		SIZEOF_PDE_INLINE_NAME, NULL);
105 	BUILD_BUG_ON(sizeof(struct proc_dir_entry) >= SIZEOF_PDE);
106 }
107 
108 void proc_invalidate_siblings_dcache(struct hlist_head *inodes, spinlock_t *lock)
109 {
110 	struct hlist_node *node;
111 	struct super_block *old_sb = NULL;
112 
113 	rcu_read_lock();
114 	while ((node = hlist_first_rcu(inodes))) {
115 		struct proc_inode *ei = hlist_entry(node, struct proc_inode, sibling_inodes);
116 		struct super_block *sb;
117 		struct inode *inode;
118 
119 		spin_lock(lock);
120 		hlist_del_init_rcu(&ei->sibling_inodes);
121 		spin_unlock(lock);
122 
123 		inode = &ei->vfs_inode;
124 		sb = inode->i_sb;
125 		if ((sb != old_sb) && !atomic_inc_not_zero(&sb->s_active))
126 			continue;
127 		inode = igrab(inode);
128 		rcu_read_unlock();
129 		if (sb != old_sb) {
130 			if (old_sb)
131 				deactivate_super(old_sb);
132 			old_sb = sb;
133 		}
134 		if (unlikely(!inode)) {
135 			rcu_read_lock();
136 			continue;
137 		}
138 
139 		if (S_ISDIR(inode->i_mode)) {
140 			struct dentry *dir = d_find_any_alias(inode);
141 			if (dir) {
142 				d_invalidate(dir);
143 				dput(dir);
144 			}
145 		} else {
146 			struct dentry *dentry;
147 			while ((dentry = d_find_alias(inode))) {
148 				d_invalidate(dentry);
149 				dput(dentry);
150 			}
151 		}
152 		iput(inode);
153 
154 		rcu_read_lock();
155 	}
156 	rcu_read_unlock();
157 	if (old_sb)
158 		deactivate_super(old_sb);
159 }
160 
161 static inline const char *hidepid2str(enum proc_hidepid v)
162 {
163 	switch (v) {
164 		case HIDEPID_OFF: return "off";
165 		case HIDEPID_NO_ACCESS: return "noaccess";
166 		case HIDEPID_INVISIBLE: return "invisible";
167 		case HIDEPID_NOT_PTRACEABLE: return "ptraceable";
168 	}
169 	WARN_ONCE(1, "bad hide_pid value: %d\n", v);
170 	return "unknown";
171 }
172 
173 static int proc_show_options(struct seq_file *seq, struct dentry *root)
174 {
175 	struct proc_fs_info *fs_info = proc_sb_info(root->d_sb);
176 
177 	if (!gid_eq(fs_info->pid_gid, GLOBAL_ROOT_GID))
178 		seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, fs_info->pid_gid));
179 	if (fs_info->hide_pid != HIDEPID_OFF)
180 		seq_printf(seq, ",hidepid=%s", hidepid2str(fs_info->hide_pid));
181 	if (fs_info->pidonly != PROC_PIDONLY_OFF)
182 		seq_printf(seq, ",subset=pid");
183 
184 	return 0;
185 }
186 
187 const struct super_operations proc_sops = {
188 	.alloc_inode	= proc_alloc_inode,
189 	.free_inode	= proc_free_inode,
190 	.drop_inode	= generic_delete_inode,
191 	.evict_inode	= proc_evict_inode,
192 	.statfs		= simple_statfs,
193 	.show_options	= proc_show_options,
194 };
195 
196 enum {BIAS = -1U<<31};
197 
198 static inline int use_pde(struct proc_dir_entry *pde)
199 {
200 	return likely(atomic_inc_unless_negative(&pde->in_use));
201 }
202 
203 static void unuse_pde(struct proc_dir_entry *pde)
204 {
205 	if (unlikely(atomic_dec_return(&pde->in_use) == BIAS))
206 		complete(pde->pde_unload_completion);
207 }
208 
209 /*
210  * At most 2 contexts can enter this function: the one doing the last
211  * close on the descriptor and whoever is deleting PDE itself.
212  *
213  * First to enter calls ->proc_release hook and signals its completion
214  * to the second one which waits and then does nothing.
215  *
216  * PDE is locked on entry, unlocked on exit.
217  */
218 static void close_pdeo(struct proc_dir_entry *pde, struct pde_opener *pdeo)
219 	__releases(&pde->pde_unload_lock)
220 {
221 	/*
222 	 * close() (proc_reg_release()) can't delete an entry and proceed:
223 	 * ->release hook needs to be available at the right moment.
224 	 *
225 	 * rmmod (remove_proc_entry() et al) can't delete an entry and proceed:
226 	 * "struct file" needs to be available at the right moment.
227 	 */
228 	if (pdeo->closing) {
229 		/* somebody else is doing that, just wait */
230 		DECLARE_COMPLETION_ONSTACK(c);
231 		pdeo->c = &c;
232 		spin_unlock(&pde->pde_unload_lock);
233 		wait_for_completion(&c);
234 	} else {
235 		struct file *file;
236 		struct completion *c;
237 
238 		pdeo->closing = true;
239 		spin_unlock(&pde->pde_unload_lock);
240 
241 		file = pdeo->file;
242 		pde->proc_ops->proc_release(file_inode(file), file);
243 
244 		spin_lock(&pde->pde_unload_lock);
245 		/* Strictly after ->proc_release, see above. */
246 		list_del(&pdeo->lh);
247 		c = pdeo->c;
248 		spin_unlock(&pde->pde_unload_lock);
249 		if (unlikely(c))
250 			complete(c);
251 		kmem_cache_free(pde_opener_cache, pdeo);
252 	}
253 }
254 
255 void proc_entry_rundown(struct proc_dir_entry *de)
256 {
257 	DECLARE_COMPLETION_ONSTACK(c);
258 	/* Wait until all existing callers into module are done. */
259 	de->pde_unload_completion = &c;
260 	if (atomic_add_return(BIAS, &de->in_use) != BIAS)
261 		wait_for_completion(&c);
262 
263 	/* ->pde_openers list can't grow from now on. */
264 
265 	spin_lock(&de->pde_unload_lock);
266 	while (!list_empty(&de->pde_openers)) {
267 		struct pde_opener *pdeo;
268 		pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh);
269 		close_pdeo(de, pdeo);
270 		spin_lock(&de->pde_unload_lock);
271 	}
272 	spin_unlock(&de->pde_unload_lock);
273 }
274 
275 static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence)
276 {
277 	struct proc_dir_entry *pde = PDE(file_inode(file));
278 	loff_t rv = -EINVAL;
279 
280 	if (pde_is_permanent(pde)) {
281 		return pde->proc_ops->proc_lseek(file, offset, whence);
282 	} else if (use_pde(pde)) {
283 		rv = pde->proc_ops->proc_lseek(file, offset, whence);
284 		unuse_pde(pde);
285 	}
286 	return rv;
287 }
288 
289 static ssize_t proc_reg_read_iter(struct kiocb *iocb, struct iov_iter *iter)
290 {
291 	struct proc_dir_entry *pde = PDE(file_inode(iocb->ki_filp));
292 	ssize_t ret;
293 
294 	if (pde_is_permanent(pde))
295 		return pde->proc_ops->proc_read_iter(iocb, iter);
296 
297 	if (!use_pde(pde))
298 		return -EIO;
299 	ret = pde->proc_ops->proc_read_iter(iocb, iter);
300 	unuse_pde(pde);
301 	return ret;
302 }
303 
304 static ssize_t pde_read(struct proc_dir_entry *pde, struct file *file, char __user *buf, size_t count, loff_t *ppos)
305 {
306 	typeof_member(struct proc_ops, proc_read) read;
307 
308 	read = pde->proc_ops->proc_read;
309 	if (read)
310 		return read(file, buf, count, ppos);
311 	return -EIO;
312 }
313 
314 static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
315 {
316 	struct proc_dir_entry *pde = PDE(file_inode(file));
317 	ssize_t rv = -EIO;
318 
319 	if (pde_is_permanent(pde)) {
320 		return pde_read(pde, file, buf, count, ppos);
321 	} else if (use_pde(pde)) {
322 		rv = pde_read(pde, file, buf, count, ppos);
323 		unuse_pde(pde);
324 	}
325 	return rv;
326 }
327 
328 static ssize_t pde_write(struct proc_dir_entry *pde, struct file *file, const char __user *buf, size_t count, loff_t *ppos)
329 {
330 	typeof_member(struct proc_ops, proc_write) write;
331 
332 	write = pde->proc_ops->proc_write;
333 	if (write)
334 		return write(file, buf, count, ppos);
335 	return -EIO;
336 }
337 
338 static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
339 {
340 	struct proc_dir_entry *pde = PDE(file_inode(file));
341 	ssize_t rv = -EIO;
342 
343 	if (pde_is_permanent(pde)) {
344 		return pde_write(pde, file, buf, count, ppos);
345 	} else if (use_pde(pde)) {
346 		rv = pde_write(pde, file, buf, count, ppos);
347 		unuse_pde(pde);
348 	}
349 	return rv;
350 }
351 
352 static __poll_t pde_poll(struct proc_dir_entry *pde, struct file *file, struct poll_table_struct *pts)
353 {
354 	typeof_member(struct proc_ops, proc_poll) poll;
355 
356 	poll = pde->proc_ops->proc_poll;
357 	if (poll)
358 		return poll(file, pts);
359 	return DEFAULT_POLLMASK;
360 }
361 
362 static __poll_t proc_reg_poll(struct file *file, struct poll_table_struct *pts)
363 {
364 	struct proc_dir_entry *pde = PDE(file_inode(file));
365 	__poll_t rv = DEFAULT_POLLMASK;
366 
367 	if (pde_is_permanent(pde)) {
368 		return pde_poll(pde, file, pts);
369 	} else if (use_pde(pde)) {
370 		rv = pde_poll(pde, file, pts);
371 		unuse_pde(pde);
372 	}
373 	return rv;
374 }
375 
376 static long pde_ioctl(struct proc_dir_entry *pde, struct file *file, unsigned int cmd, unsigned long arg)
377 {
378 	typeof_member(struct proc_ops, proc_ioctl) ioctl;
379 
380 	ioctl = pde->proc_ops->proc_ioctl;
381 	if (ioctl)
382 		return ioctl(file, cmd, arg);
383 	return -ENOTTY;
384 }
385 
386 static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
387 {
388 	struct proc_dir_entry *pde = PDE(file_inode(file));
389 	long rv = -ENOTTY;
390 
391 	if (pde_is_permanent(pde)) {
392 		return pde_ioctl(pde, file, cmd, arg);
393 	} else if (use_pde(pde)) {
394 		rv = pde_ioctl(pde, file, cmd, arg);
395 		unuse_pde(pde);
396 	}
397 	return rv;
398 }
399 
400 #ifdef CONFIG_COMPAT
401 static long pde_compat_ioctl(struct proc_dir_entry *pde, struct file *file, unsigned int cmd, unsigned long arg)
402 {
403 	typeof_member(struct proc_ops, proc_compat_ioctl) compat_ioctl;
404 
405 	compat_ioctl = pde->proc_ops->proc_compat_ioctl;
406 	if (compat_ioctl)
407 		return compat_ioctl(file, cmd, arg);
408 	return -ENOTTY;
409 }
410 
411 static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
412 {
413 	struct proc_dir_entry *pde = PDE(file_inode(file));
414 	long rv = -ENOTTY;
415 	if (pde_is_permanent(pde)) {
416 		return pde_compat_ioctl(pde, file, cmd, arg);
417 	} else if (use_pde(pde)) {
418 		rv = pde_compat_ioctl(pde, file, cmd, arg);
419 		unuse_pde(pde);
420 	}
421 	return rv;
422 }
423 #endif
424 
425 static int pde_mmap(struct proc_dir_entry *pde, struct file *file, struct vm_area_struct *vma)
426 {
427 	typeof_member(struct proc_ops, proc_mmap) mmap;
428 
429 	mmap = pde->proc_ops->proc_mmap;
430 	if (mmap)
431 		return mmap(file, vma);
432 	return -EIO;
433 }
434 
435 static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
436 {
437 	struct proc_dir_entry *pde = PDE(file_inode(file));
438 	int rv = -EIO;
439 
440 	if (pde_is_permanent(pde)) {
441 		return pde_mmap(pde, file, vma);
442 	} else if (use_pde(pde)) {
443 		rv = pde_mmap(pde, file, vma);
444 		unuse_pde(pde);
445 	}
446 	return rv;
447 }
448 
449 static unsigned long
450 pde_get_unmapped_area(struct proc_dir_entry *pde, struct file *file, unsigned long orig_addr,
451 			   unsigned long len, unsigned long pgoff,
452 			   unsigned long flags)
453 {
454 	typeof_member(struct proc_ops, proc_get_unmapped_area) get_area;
455 
456 	get_area = pde->proc_ops->proc_get_unmapped_area;
457 #ifdef CONFIG_MMU
458 	if (!get_area)
459 		get_area = current->mm->get_unmapped_area;
460 #endif
461 	if (get_area)
462 		return get_area(file, orig_addr, len, pgoff, flags);
463 	return orig_addr;
464 }
465 
466 static unsigned long
467 proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr,
468 			   unsigned long len, unsigned long pgoff,
469 			   unsigned long flags)
470 {
471 	struct proc_dir_entry *pde = PDE(file_inode(file));
472 	unsigned long rv = -EIO;
473 
474 	if (pde_is_permanent(pde)) {
475 		return pde_get_unmapped_area(pde, file, orig_addr, len, pgoff, flags);
476 	} else if (use_pde(pde)) {
477 		rv = pde_get_unmapped_area(pde, file, orig_addr, len, pgoff, flags);
478 		unuse_pde(pde);
479 	}
480 	return rv;
481 }
482 
483 static int proc_reg_open(struct inode *inode, struct file *file)
484 {
485 	struct proc_dir_entry *pde = PDE(inode);
486 	int rv = 0;
487 	typeof_member(struct proc_ops, proc_open) open;
488 	typeof_member(struct proc_ops, proc_release) release;
489 	struct pde_opener *pdeo;
490 
491 	if (!pde->proc_ops->proc_lseek)
492 		file->f_mode &= ~FMODE_LSEEK;
493 
494 	if (pde_is_permanent(pde)) {
495 		open = pde->proc_ops->proc_open;
496 		if (open)
497 			rv = open(inode, file);
498 		return rv;
499 	}
500 
501 	/*
502 	 * Ensure that
503 	 * 1) PDE's ->release hook will be called no matter what
504 	 *    either normally by close()/->release, or forcefully by
505 	 *    rmmod/remove_proc_entry.
506 	 *
507 	 * 2) rmmod isn't blocked by opening file in /proc and sitting on
508 	 *    the descriptor (including "rmmod foo </proc/foo" scenario).
509 	 *
510 	 * Save every "struct file" with custom ->release hook.
511 	 */
512 	if (!use_pde(pde))
513 		return -ENOENT;
514 
515 	release = pde->proc_ops->proc_release;
516 	if (release) {
517 		pdeo = kmem_cache_alloc(pde_opener_cache, GFP_KERNEL);
518 		if (!pdeo) {
519 			rv = -ENOMEM;
520 			goto out_unuse;
521 		}
522 	}
523 
524 	open = pde->proc_ops->proc_open;
525 	if (open)
526 		rv = open(inode, file);
527 
528 	if (release) {
529 		if (rv == 0) {
530 			/* To know what to release. */
531 			pdeo->file = file;
532 			pdeo->closing = false;
533 			pdeo->c = NULL;
534 			spin_lock(&pde->pde_unload_lock);
535 			list_add(&pdeo->lh, &pde->pde_openers);
536 			spin_unlock(&pde->pde_unload_lock);
537 		} else
538 			kmem_cache_free(pde_opener_cache, pdeo);
539 	}
540 
541 out_unuse:
542 	unuse_pde(pde);
543 	return rv;
544 }
545 
546 static int proc_reg_release(struct inode *inode, struct file *file)
547 {
548 	struct proc_dir_entry *pde = PDE(inode);
549 	struct pde_opener *pdeo;
550 
551 	if (pde_is_permanent(pde)) {
552 		typeof_member(struct proc_ops, proc_release) release;
553 
554 		release = pde->proc_ops->proc_release;
555 		if (release) {
556 			return release(inode, file);
557 		}
558 		return 0;
559 	}
560 
561 	spin_lock(&pde->pde_unload_lock);
562 	list_for_each_entry(pdeo, &pde->pde_openers, lh) {
563 		if (pdeo->file == file) {
564 			close_pdeo(pde, pdeo);
565 			return 0;
566 		}
567 	}
568 	spin_unlock(&pde->pde_unload_lock);
569 	return 0;
570 }
571 
572 static const struct file_operations proc_reg_file_ops = {
573 	.llseek		= proc_reg_llseek,
574 	.read		= proc_reg_read,
575 	.write		= proc_reg_write,
576 	.poll		= proc_reg_poll,
577 	.unlocked_ioctl	= proc_reg_unlocked_ioctl,
578 	.mmap		= proc_reg_mmap,
579 	.get_unmapped_area = proc_reg_get_unmapped_area,
580 	.open		= proc_reg_open,
581 	.release	= proc_reg_release,
582 };
583 
584 static const struct file_operations proc_iter_file_ops = {
585 	.llseek		= proc_reg_llseek,
586 	.read_iter	= proc_reg_read_iter,
587 	.write		= proc_reg_write,
588 	.splice_read	= copy_splice_read,
589 	.poll		= proc_reg_poll,
590 	.unlocked_ioctl	= proc_reg_unlocked_ioctl,
591 	.mmap		= proc_reg_mmap,
592 	.get_unmapped_area = proc_reg_get_unmapped_area,
593 	.open		= proc_reg_open,
594 	.release	= proc_reg_release,
595 };
596 
597 #ifdef CONFIG_COMPAT
598 static const struct file_operations proc_reg_file_ops_compat = {
599 	.llseek		= proc_reg_llseek,
600 	.read		= proc_reg_read,
601 	.write		= proc_reg_write,
602 	.poll		= proc_reg_poll,
603 	.unlocked_ioctl	= proc_reg_unlocked_ioctl,
604 	.compat_ioctl	= proc_reg_compat_ioctl,
605 	.mmap		= proc_reg_mmap,
606 	.get_unmapped_area = proc_reg_get_unmapped_area,
607 	.open		= proc_reg_open,
608 	.release	= proc_reg_release,
609 };
610 
611 static const struct file_operations proc_iter_file_ops_compat = {
612 	.llseek		= proc_reg_llseek,
613 	.read_iter	= proc_reg_read_iter,
614 	.splice_read	= copy_splice_read,
615 	.write		= proc_reg_write,
616 	.poll		= proc_reg_poll,
617 	.unlocked_ioctl	= proc_reg_unlocked_ioctl,
618 	.compat_ioctl	= proc_reg_compat_ioctl,
619 	.mmap		= proc_reg_mmap,
620 	.get_unmapped_area = proc_reg_get_unmapped_area,
621 	.open		= proc_reg_open,
622 	.release	= proc_reg_release,
623 };
624 #endif
625 
626 static void proc_put_link(void *p)
627 {
628 	unuse_pde(p);
629 }
630 
631 static const char *proc_get_link(struct dentry *dentry,
632 				 struct inode *inode,
633 				 struct delayed_call *done)
634 {
635 	struct proc_dir_entry *pde = PDE(inode);
636 	if (!use_pde(pde))
637 		return ERR_PTR(-EINVAL);
638 	set_delayed_call(done, proc_put_link, pde);
639 	return pde->data;
640 }
641 
642 const struct inode_operations proc_link_inode_operations = {
643 	.get_link	= proc_get_link,
644 };
645 
646 struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
647 {
648 	struct inode *inode = new_inode(sb);
649 
650 	if (!inode) {
651 		pde_put(de);
652 		return NULL;
653 	}
654 
655 	inode->i_private = de->data;
656 	inode->i_ino = de->low_ino;
657 	simple_inode_init_ts(inode);
658 	PROC_I(inode)->pde = de;
659 	if (is_empty_pde(de)) {
660 		make_empty_dir_inode(inode);
661 		return inode;
662 	}
663 
664 	if (de->mode) {
665 		inode->i_mode = de->mode;
666 		inode->i_uid = de->uid;
667 		inode->i_gid = de->gid;
668 	}
669 	if (de->size)
670 		inode->i_size = de->size;
671 	if (de->nlink)
672 		set_nlink(inode, de->nlink);
673 
674 	if (S_ISREG(inode->i_mode)) {
675 		inode->i_op = de->proc_iops;
676 		if (de->proc_ops->proc_read_iter)
677 			inode->i_fop = &proc_iter_file_ops;
678 		else
679 			inode->i_fop = &proc_reg_file_ops;
680 #ifdef CONFIG_COMPAT
681 		if (de->proc_ops->proc_compat_ioctl) {
682 			if (de->proc_ops->proc_read_iter)
683 				inode->i_fop = &proc_iter_file_ops_compat;
684 			else
685 				inode->i_fop = &proc_reg_file_ops_compat;
686 		}
687 #endif
688 	} else if (S_ISDIR(inode->i_mode)) {
689 		inode->i_op = de->proc_iops;
690 		inode->i_fop = de->proc_dir_ops;
691 	} else if (S_ISLNK(inode->i_mode)) {
692 		inode->i_op = de->proc_iops;
693 		inode->i_fop = NULL;
694 	} else {
695 		BUG();
696 	}
697 	return inode;
698 }
699