xref: /linux/fs/proc/inode.c (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/proc/inode.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  */
7 
8 #include <linux/cache.h>
9 #include <linux/time.h>
10 #include <linux/proc_fs.h>
11 #include <linux/kernel.h>
12 #include <linux/pid_namespace.h>
13 #include <linux/mm.h>
14 #include <linux/string.h>
15 #include <linux/stat.h>
16 #include <linux/completion.h>
17 #include <linux/poll.h>
18 #include <linux/printk.h>
19 #include <linux/file.h>
20 #include <linux/limits.h>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/sysctl.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/mount.h>
27 #include <linux/bug.h>
28 
29 #include "internal.h"
30 
31 static void proc_evict_inode(struct inode *inode)
32 {
33 	struct ctl_table_header *head;
34 	struct proc_inode *ei = PROC_I(inode);
35 
36 	truncate_inode_pages_final(&inode->i_data);
37 	clear_inode(inode);
38 
39 	/* Stop tracking associated processes */
40 	if (ei->pid)
41 		proc_pid_evict_inode(ei);
42 
43 	head = ei->sysctl;
44 	if (head) {
45 		RCU_INIT_POINTER(ei->sysctl, NULL);
46 		proc_sys_evict_inode(inode, head);
47 	}
48 }
49 
50 static struct kmem_cache *proc_inode_cachep __ro_after_init;
51 static struct kmem_cache *pde_opener_cache __ro_after_init;
52 
53 static struct inode *proc_alloc_inode(struct super_block *sb)
54 {
55 	struct proc_inode *ei;
56 
57 	ei = alloc_inode_sb(sb, proc_inode_cachep, GFP_KERNEL);
58 	if (!ei)
59 		return NULL;
60 	ei->pid = NULL;
61 	ei->fd = 0;
62 	ei->op.proc_get_link = NULL;
63 	ei->pde = NULL;
64 	ei->sysctl = NULL;
65 	ei->sysctl_entry = NULL;
66 	INIT_HLIST_NODE(&ei->sibling_inodes);
67 	ei->ns_ops = NULL;
68 	return &ei->vfs_inode;
69 }
70 
71 static void proc_free_inode(struct inode *inode)
72 {
73 	struct proc_inode *ei = PROC_I(inode);
74 
75 	if (ei->pid)
76 		put_pid(ei->pid);
77 	/* Let go of any associated proc directory entry */
78 	if (ei->pde)
79 		pde_put(ei->pde);
80 	kmem_cache_free(proc_inode_cachep, PROC_I(inode));
81 }
82 
83 static void init_once(void *foo)
84 {
85 	struct proc_inode *ei = (struct proc_inode *) foo;
86 
87 	inode_init_once(&ei->vfs_inode);
88 }
89 
90 void __init proc_init_kmemcache(void)
91 {
92 	proc_inode_cachep = kmem_cache_create("proc_inode_cache",
93 					     sizeof(struct proc_inode),
94 					     0, (SLAB_RECLAIM_ACCOUNT|
95 						SLAB_ACCOUNT|
96 						SLAB_PANIC),
97 					     init_once);
98 	pde_opener_cache =
99 		kmem_cache_create("pde_opener", sizeof(struct pde_opener), 0,
100 				  SLAB_ACCOUNT|SLAB_PANIC, NULL);
101 	proc_dir_entry_cache = kmem_cache_create_usercopy(
102 		"proc_dir_entry", SIZEOF_PDE, 0, SLAB_PANIC,
103 		offsetof(struct proc_dir_entry, inline_name),
104 		SIZEOF_PDE_INLINE_NAME, NULL);
105 	BUILD_BUG_ON(sizeof(struct proc_dir_entry) >= SIZEOF_PDE);
106 }
107 
108 void proc_invalidate_siblings_dcache(struct hlist_head *inodes, spinlock_t *lock)
109 {
110 	struct hlist_node *node;
111 	struct super_block *old_sb = NULL;
112 
113 	rcu_read_lock();
114 	while ((node = hlist_first_rcu(inodes))) {
115 		struct proc_inode *ei = hlist_entry(node, struct proc_inode, sibling_inodes);
116 		struct super_block *sb;
117 		struct inode *inode;
118 
119 		spin_lock(lock);
120 		hlist_del_init_rcu(&ei->sibling_inodes);
121 		spin_unlock(lock);
122 
123 		inode = &ei->vfs_inode;
124 		sb = inode->i_sb;
125 		if ((sb != old_sb) && !atomic_inc_not_zero(&sb->s_active))
126 			continue;
127 		inode = igrab(inode);
128 		rcu_read_unlock();
129 		if (sb != old_sb) {
130 			if (old_sb)
131 				deactivate_super(old_sb);
132 			old_sb = sb;
133 		}
134 		if (unlikely(!inode)) {
135 			rcu_read_lock();
136 			continue;
137 		}
138 
139 		if (S_ISDIR(inode->i_mode)) {
140 			struct dentry *dir = d_find_any_alias(inode);
141 			if (dir) {
142 				d_invalidate(dir);
143 				dput(dir);
144 			}
145 		} else {
146 			struct dentry *dentry;
147 			while ((dentry = d_find_alias(inode))) {
148 				d_invalidate(dentry);
149 				dput(dentry);
150 			}
151 		}
152 		iput(inode);
153 
154 		rcu_read_lock();
155 	}
156 	rcu_read_unlock();
157 	if (old_sb)
158 		deactivate_super(old_sb);
159 }
160 
161 static inline const char *hidepid2str(enum proc_hidepid v)
162 {
163 	switch (v) {
164 		case HIDEPID_OFF: return "off";
165 		case HIDEPID_NO_ACCESS: return "noaccess";
166 		case HIDEPID_INVISIBLE: return "invisible";
167 		case HIDEPID_NOT_PTRACEABLE: return "ptraceable";
168 	}
169 	WARN_ONCE(1, "bad hide_pid value: %d\n", v);
170 	return "unknown";
171 }
172 
173 static int proc_show_options(struct seq_file *seq, struct dentry *root)
174 {
175 	struct proc_fs_info *fs_info = proc_sb_info(root->d_sb);
176 
177 	if (!gid_eq(fs_info->pid_gid, GLOBAL_ROOT_GID))
178 		seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, fs_info->pid_gid));
179 	if (fs_info->hide_pid != HIDEPID_OFF)
180 		seq_printf(seq, ",hidepid=%s", hidepid2str(fs_info->hide_pid));
181 	if (fs_info->pidonly != PROC_PIDONLY_OFF)
182 		seq_printf(seq, ",subset=pid");
183 
184 	return 0;
185 }
186 
187 const struct super_operations proc_sops = {
188 	.alloc_inode	= proc_alloc_inode,
189 	.free_inode	= proc_free_inode,
190 	.drop_inode	= generic_delete_inode,
191 	.evict_inode	= proc_evict_inode,
192 	.statfs		= simple_statfs,
193 	.show_options	= proc_show_options,
194 };
195 
196 enum {BIAS = -1U<<31};
197 
198 static inline int use_pde(struct proc_dir_entry *pde)
199 {
200 	return likely(atomic_inc_unless_negative(&pde->in_use));
201 }
202 
203 static void unuse_pde(struct proc_dir_entry *pde)
204 {
205 	if (unlikely(atomic_dec_return(&pde->in_use) == BIAS))
206 		complete(pde->pde_unload_completion);
207 }
208 
209 /*
210  * At most 2 contexts can enter this function: the one doing the last
211  * close on the descriptor and whoever is deleting PDE itself.
212  *
213  * First to enter calls ->proc_release hook and signals its completion
214  * to the second one which waits and then does nothing.
215  *
216  * PDE is locked on entry, unlocked on exit.
217  */
218 static void close_pdeo(struct proc_dir_entry *pde, struct pde_opener *pdeo)
219 	__releases(&pde->pde_unload_lock)
220 {
221 	/*
222 	 * close() (proc_reg_release()) can't delete an entry and proceed:
223 	 * ->release hook needs to be available at the right moment.
224 	 *
225 	 * rmmod (remove_proc_entry() et al) can't delete an entry and proceed:
226 	 * "struct file" needs to be available at the right moment.
227 	 */
228 	if (pdeo->closing) {
229 		/* somebody else is doing that, just wait */
230 		DECLARE_COMPLETION_ONSTACK(c);
231 		pdeo->c = &c;
232 		spin_unlock(&pde->pde_unload_lock);
233 		wait_for_completion(&c);
234 	} else {
235 		struct file *file;
236 		struct completion *c;
237 
238 		pdeo->closing = true;
239 		spin_unlock(&pde->pde_unload_lock);
240 
241 		file = pdeo->file;
242 		pde->proc_ops->proc_release(file_inode(file), file);
243 
244 		spin_lock(&pde->pde_unload_lock);
245 		/* Strictly after ->proc_release, see above. */
246 		list_del(&pdeo->lh);
247 		c = pdeo->c;
248 		spin_unlock(&pde->pde_unload_lock);
249 		if (unlikely(c))
250 			complete(c);
251 		kmem_cache_free(pde_opener_cache, pdeo);
252 	}
253 }
254 
255 void proc_entry_rundown(struct proc_dir_entry *de)
256 {
257 	DECLARE_COMPLETION_ONSTACK(c);
258 	/* Wait until all existing callers into module are done. */
259 	de->pde_unload_completion = &c;
260 	if (atomic_add_return(BIAS, &de->in_use) != BIAS)
261 		wait_for_completion(&c);
262 
263 	/* ->pde_openers list can't grow from now on. */
264 
265 	spin_lock(&de->pde_unload_lock);
266 	while (!list_empty(&de->pde_openers)) {
267 		struct pde_opener *pdeo;
268 		pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh);
269 		close_pdeo(de, pdeo);
270 		spin_lock(&de->pde_unload_lock);
271 	}
272 	spin_unlock(&de->pde_unload_lock);
273 }
274 
275 static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence)
276 {
277 	struct proc_dir_entry *pde = PDE(file_inode(file));
278 	loff_t rv = -EINVAL;
279 
280 	if (pde_is_permanent(pde)) {
281 		return pde->proc_ops->proc_lseek(file, offset, whence);
282 	} else if (use_pde(pde)) {
283 		rv = pde->proc_ops->proc_lseek(file, offset, whence);
284 		unuse_pde(pde);
285 	}
286 	return rv;
287 }
288 
289 static ssize_t proc_reg_read_iter(struct kiocb *iocb, struct iov_iter *iter)
290 {
291 	struct proc_dir_entry *pde = PDE(file_inode(iocb->ki_filp));
292 	ssize_t ret;
293 
294 	if (pde_is_permanent(pde))
295 		return pde->proc_ops->proc_read_iter(iocb, iter);
296 
297 	if (!use_pde(pde))
298 		return -EIO;
299 	ret = pde->proc_ops->proc_read_iter(iocb, iter);
300 	unuse_pde(pde);
301 	return ret;
302 }
303 
304 static ssize_t pde_read(struct proc_dir_entry *pde, struct file *file, char __user *buf, size_t count, loff_t *ppos)
305 {
306 	typeof_member(struct proc_ops, proc_read) read;
307 
308 	read = pde->proc_ops->proc_read;
309 	if (read)
310 		return read(file, buf, count, ppos);
311 	return -EIO;
312 }
313 
314 static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
315 {
316 	struct proc_dir_entry *pde = PDE(file_inode(file));
317 	ssize_t rv = -EIO;
318 
319 	if (pde_is_permanent(pde)) {
320 		return pde_read(pde, file, buf, count, ppos);
321 	} else if (use_pde(pde)) {
322 		rv = pde_read(pde, file, buf, count, ppos);
323 		unuse_pde(pde);
324 	}
325 	return rv;
326 }
327 
328 static ssize_t pde_write(struct proc_dir_entry *pde, struct file *file, const char __user *buf, size_t count, loff_t *ppos)
329 {
330 	typeof_member(struct proc_ops, proc_write) write;
331 
332 	write = pde->proc_ops->proc_write;
333 	if (write)
334 		return write(file, buf, count, ppos);
335 	return -EIO;
336 }
337 
338 static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
339 {
340 	struct proc_dir_entry *pde = PDE(file_inode(file));
341 	ssize_t rv = -EIO;
342 
343 	if (pde_is_permanent(pde)) {
344 		return pde_write(pde, file, buf, count, ppos);
345 	} else if (use_pde(pde)) {
346 		rv = pde_write(pde, file, buf, count, ppos);
347 		unuse_pde(pde);
348 	}
349 	return rv;
350 }
351 
352 static __poll_t pde_poll(struct proc_dir_entry *pde, struct file *file, struct poll_table_struct *pts)
353 {
354 	typeof_member(struct proc_ops, proc_poll) poll;
355 
356 	poll = pde->proc_ops->proc_poll;
357 	if (poll)
358 		return poll(file, pts);
359 	return DEFAULT_POLLMASK;
360 }
361 
362 static __poll_t proc_reg_poll(struct file *file, struct poll_table_struct *pts)
363 {
364 	struct proc_dir_entry *pde = PDE(file_inode(file));
365 	__poll_t rv = DEFAULT_POLLMASK;
366 
367 	if (pde_is_permanent(pde)) {
368 		return pde_poll(pde, file, pts);
369 	} else if (use_pde(pde)) {
370 		rv = pde_poll(pde, file, pts);
371 		unuse_pde(pde);
372 	}
373 	return rv;
374 }
375 
376 static long pde_ioctl(struct proc_dir_entry *pde, struct file *file, unsigned int cmd, unsigned long arg)
377 {
378 	typeof_member(struct proc_ops, proc_ioctl) ioctl;
379 
380 	ioctl = pde->proc_ops->proc_ioctl;
381 	if (ioctl)
382 		return ioctl(file, cmd, arg);
383 	return -ENOTTY;
384 }
385 
386 static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
387 {
388 	struct proc_dir_entry *pde = PDE(file_inode(file));
389 	long rv = -ENOTTY;
390 
391 	if (pde_is_permanent(pde)) {
392 		return pde_ioctl(pde, file, cmd, arg);
393 	} else if (use_pde(pde)) {
394 		rv = pde_ioctl(pde, file, cmd, arg);
395 		unuse_pde(pde);
396 	}
397 	return rv;
398 }
399 
400 #ifdef CONFIG_COMPAT
401 static long pde_compat_ioctl(struct proc_dir_entry *pde, struct file *file, unsigned int cmd, unsigned long arg)
402 {
403 	typeof_member(struct proc_ops, proc_compat_ioctl) compat_ioctl;
404 
405 	compat_ioctl = pde->proc_ops->proc_compat_ioctl;
406 	if (compat_ioctl)
407 		return compat_ioctl(file, cmd, arg);
408 	return -ENOTTY;
409 }
410 
411 static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
412 {
413 	struct proc_dir_entry *pde = PDE(file_inode(file));
414 	long rv = -ENOTTY;
415 	if (pde_is_permanent(pde)) {
416 		return pde_compat_ioctl(pde, file, cmd, arg);
417 	} else if (use_pde(pde)) {
418 		rv = pde_compat_ioctl(pde, file, cmd, arg);
419 		unuse_pde(pde);
420 	}
421 	return rv;
422 }
423 #endif
424 
425 static int pde_mmap(struct proc_dir_entry *pde, struct file *file, struct vm_area_struct *vma)
426 {
427 	typeof_member(struct proc_ops, proc_mmap) mmap;
428 
429 	mmap = pde->proc_ops->proc_mmap;
430 	if (mmap)
431 		return mmap(file, vma);
432 	return -EIO;
433 }
434 
435 static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
436 {
437 	struct proc_dir_entry *pde = PDE(file_inode(file));
438 	int rv = -EIO;
439 
440 	if (pde_is_permanent(pde)) {
441 		return pde_mmap(pde, file, vma);
442 	} else if (use_pde(pde)) {
443 		rv = pde_mmap(pde, file, vma);
444 		unuse_pde(pde);
445 	}
446 	return rv;
447 }
448 
449 static unsigned long
450 pde_get_unmapped_area(struct proc_dir_entry *pde, struct file *file, unsigned long orig_addr,
451 			   unsigned long len, unsigned long pgoff,
452 			   unsigned long flags)
453 {
454 	if (pde->proc_ops->proc_get_unmapped_area)
455 		return pde->proc_ops->proc_get_unmapped_area(file, orig_addr, len, pgoff, flags);
456 
457 #ifdef CONFIG_MMU
458 	return mm_get_unmapped_area(current->mm, file, orig_addr, len, pgoff, flags);
459 #endif
460 
461 	return orig_addr;
462 }
463 
464 static unsigned long
465 proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr,
466 			   unsigned long len, unsigned long pgoff,
467 			   unsigned long flags)
468 {
469 	struct proc_dir_entry *pde = PDE(file_inode(file));
470 	unsigned long rv = -EIO;
471 
472 	if (pde_is_permanent(pde)) {
473 		return pde_get_unmapped_area(pde, file, orig_addr, len, pgoff, flags);
474 	} else if (use_pde(pde)) {
475 		rv = pde_get_unmapped_area(pde, file, orig_addr, len, pgoff, flags);
476 		unuse_pde(pde);
477 	}
478 	return rv;
479 }
480 
481 static int proc_reg_open(struct inode *inode, struct file *file)
482 {
483 	struct proc_dir_entry *pde = PDE(inode);
484 	int rv = 0;
485 	typeof_member(struct proc_ops, proc_open) open;
486 	typeof_member(struct proc_ops, proc_release) release;
487 	struct pde_opener *pdeo;
488 
489 	if (!pde->proc_ops->proc_lseek)
490 		file->f_mode &= ~FMODE_LSEEK;
491 
492 	if (pde_is_permanent(pde)) {
493 		open = pde->proc_ops->proc_open;
494 		if (open)
495 			rv = open(inode, file);
496 		return rv;
497 	}
498 
499 	/*
500 	 * Ensure that
501 	 * 1) PDE's ->release hook will be called no matter what
502 	 *    either normally by close()/->release, or forcefully by
503 	 *    rmmod/remove_proc_entry.
504 	 *
505 	 * 2) rmmod isn't blocked by opening file in /proc and sitting on
506 	 *    the descriptor (including "rmmod foo </proc/foo" scenario).
507 	 *
508 	 * Save every "struct file" with custom ->release hook.
509 	 */
510 	if (!use_pde(pde))
511 		return -ENOENT;
512 
513 	release = pde->proc_ops->proc_release;
514 	if (release) {
515 		pdeo = kmem_cache_alloc(pde_opener_cache, GFP_KERNEL);
516 		if (!pdeo) {
517 			rv = -ENOMEM;
518 			goto out_unuse;
519 		}
520 	}
521 
522 	open = pde->proc_ops->proc_open;
523 	if (open)
524 		rv = open(inode, file);
525 
526 	if (release) {
527 		if (rv == 0) {
528 			/* To know what to release. */
529 			pdeo->file = file;
530 			pdeo->closing = false;
531 			pdeo->c = NULL;
532 			spin_lock(&pde->pde_unload_lock);
533 			list_add(&pdeo->lh, &pde->pde_openers);
534 			spin_unlock(&pde->pde_unload_lock);
535 		} else
536 			kmem_cache_free(pde_opener_cache, pdeo);
537 	}
538 
539 out_unuse:
540 	unuse_pde(pde);
541 	return rv;
542 }
543 
544 static int proc_reg_release(struct inode *inode, struct file *file)
545 {
546 	struct proc_dir_entry *pde = PDE(inode);
547 	struct pde_opener *pdeo;
548 
549 	if (pde_is_permanent(pde)) {
550 		typeof_member(struct proc_ops, proc_release) release;
551 
552 		release = pde->proc_ops->proc_release;
553 		if (release) {
554 			return release(inode, file);
555 		}
556 		return 0;
557 	}
558 
559 	spin_lock(&pde->pde_unload_lock);
560 	list_for_each_entry(pdeo, &pde->pde_openers, lh) {
561 		if (pdeo->file == file) {
562 			close_pdeo(pde, pdeo);
563 			return 0;
564 		}
565 	}
566 	spin_unlock(&pde->pde_unload_lock);
567 	return 0;
568 }
569 
570 static const struct file_operations proc_reg_file_ops = {
571 	.llseek		= proc_reg_llseek,
572 	.read		= proc_reg_read,
573 	.write		= proc_reg_write,
574 	.poll		= proc_reg_poll,
575 	.unlocked_ioctl	= proc_reg_unlocked_ioctl,
576 	.mmap		= proc_reg_mmap,
577 	.get_unmapped_area = proc_reg_get_unmapped_area,
578 	.open		= proc_reg_open,
579 	.release	= proc_reg_release,
580 };
581 
582 static const struct file_operations proc_iter_file_ops = {
583 	.llseek		= proc_reg_llseek,
584 	.read_iter	= proc_reg_read_iter,
585 	.write		= proc_reg_write,
586 	.splice_read	= copy_splice_read,
587 	.poll		= proc_reg_poll,
588 	.unlocked_ioctl	= proc_reg_unlocked_ioctl,
589 	.mmap		= proc_reg_mmap,
590 	.get_unmapped_area = proc_reg_get_unmapped_area,
591 	.open		= proc_reg_open,
592 	.release	= proc_reg_release,
593 };
594 
595 #ifdef CONFIG_COMPAT
596 static const struct file_operations proc_reg_file_ops_compat = {
597 	.llseek		= proc_reg_llseek,
598 	.read		= proc_reg_read,
599 	.write		= proc_reg_write,
600 	.poll		= proc_reg_poll,
601 	.unlocked_ioctl	= proc_reg_unlocked_ioctl,
602 	.compat_ioctl	= proc_reg_compat_ioctl,
603 	.mmap		= proc_reg_mmap,
604 	.get_unmapped_area = proc_reg_get_unmapped_area,
605 	.open		= proc_reg_open,
606 	.release	= proc_reg_release,
607 };
608 
609 static const struct file_operations proc_iter_file_ops_compat = {
610 	.llseek		= proc_reg_llseek,
611 	.read_iter	= proc_reg_read_iter,
612 	.splice_read	= copy_splice_read,
613 	.write		= proc_reg_write,
614 	.poll		= proc_reg_poll,
615 	.unlocked_ioctl	= proc_reg_unlocked_ioctl,
616 	.compat_ioctl	= proc_reg_compat_ioctl,
617 	.mmap		= proc_reg_mmap,
618 	.get_unmapped_area = proc_reg_get_unmapped_area,
619 	.open		= proc_reg_open,
620 	.release	= proc_reg_release,
621 };
622 #endif
623 
624 static void proc_put_link(void *p)
625 {
626 	unuse_pde(p);
627 }
628 
629 static const char *proc_get_link(struct dentry *dentry,
630 				 struct inode *inode,
631 				 struct delayed_call *done)
632 {
633 	struct proc_dir_entry *pde = PDE(inode);
634 	if (!use_pde(pde))
635 		return ERR_PTR(-EINVAL);
636 	set_delayed_call(done, proc_put_link, pde);
637 	return pde->data;
638 }
639 
640 const struct inode_operations proc_link_inode_operations = {
641 	.get_link	= proc_get_link,
642 };
643 
644 struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
645 {
646 	struct inode *inode = new_inode(sb);
647 
648 	if (!inode) {
649 		pde_put(de);
650 		return NULL;
651 	}
652 
653 	inode->i_private = de->data;
654 	inode->i_ino = de->low_ino;
655 	simple_inode_init_ts(inode);
656 	PROC_I(inode)->pde = de;
657 	if (is_empty_pde(de)) {
658 		make_empty_dir_inode(inode);
659 		return inode;
660 	}
661 
662 	if (de->mode) {
663 		inode->i_mode = de->mode;
664 		inode->i_uid = de->uid;
665 		inode->i_gid = de->gid;
666 	}
667 	if (de->size)
668 		inode->i_size = de->size;
669 	if (de->nlink)
670 		set_nlink(inode, de->nlink);
671 
672 	if (S_ISREG(inode->i_mode)) {
673 		inode->i_op = de->proc_iops;
674 		if (de->proc_ops->proc_read_iter)
675 			inode->i_fop = &proc_iter_file_ops;
676 		else
677 			inode->i_fop = &proc_reg_file_ops;
678 #ifdef CONFIG_COMPAT
679 		if (de->proc_ops->proc_compat_ioctl) {
680 			if (de->proc_ops->proc_read_iter)
681 				inode->i_fop = &proc_iter_file_ops_compat;
682 			else
683 				inode->i_fop = &proc_reg_file_ops_compat;
684 		}
685 #endif
686 	} else if (S_ISDIR(inode->i_mode)) {
687 		inode->i_op = de->proc_iops;
688 		inode->i_fop = de->proc_dir_ops;
689 	} else if (S_ISLNK(inode->i_mode)) {
690 		inode->i_op = de->proc_iops;
691 		inode->i_fop = NULL;
692 	} else {
693 		BUG();
694 	}
695 	return inode;
696 }
697