xref: /linux/fs/proc/internal.h (revision 1a80ff0f8896750156f22dbf2d4591d79bb2a155)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* Internal procfs definitions
3  *
4  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/proc_fs.h>
9 #include <linux/proc_ns.h>
10 #include <linux/refcount.h>
11 #include <linux/spinlock.h>
12 #include <linux/atomic.h>
13 #include <linux/binfmts.h>
14 #include <linux/sched/coredump.h>
15 #include <linux/sched/task.h>
16 #include <linux/mm.h>
17 
18 struct ctl_table_header;
19 struct mempolicy;
20 
21 /*
22  * This is not completely implemented yet. The idea is to
23  * create an in-memory tree (like the actual /proc filesystem
24  * tree) of these proc_dir_entries, so that we can dynamically
25  * add new files to /proc.
26  *
27  * parent/subdir are used for the directory structure (every /proc file has a
28  * parent, but "subdir" is empty for all non-directory entries).
29  * subdir_node is used to build the rb tree "subdir" of the parent.
30  */
31 struct proc_dir_entry {
32 	/*
33 	 * number of callers into module in progress;
34 	 * negative -> it's going away RSN
35 	 */
36 	atomic_t in_use;
37 	refcount_t refcnt;
38 	struct list_head pde_openers;	/* who did ->open, but not ->release */
39 	/* protects ->pde_openers and all struct pde_opener instances */
40 	spinlock_t pde_unload_lock;
41 	struct completion *pde_unload_completion;
42 	const struct inode_operations *proc_iops;
43 	union {
44 		const struct proc_ops *proc_ops;
45 		const struct file_operations *proc_dir_ops;
46 	};
47 	const struct dentry_operations *proc_dops;
48 	union {
49 		const struct seq_operations *seq_ops;
50 		int (*single_show)(struct seq_file *, void *);
51 	};
52 	proc_write_t write;
53 	void *data;
54 	unsigned int state_size;
55 	unsigned int low_ino;
56 	nlink_t nlink;
57 	kuid_t uid;
58 	kgid_t gid;
59 	loff_t size;
60 	struct proc_dir_entry *parent;
61 	struct rb_root subdir;
62 	struct rb_node subdir_node;
63 	char *name;
64 	umode_t mode;
65 	u8 flags;
66 	u8 namelen;
67 	char inline_name[];
68 } __randomize_layout;
69 
70 #define SIZEOF_PDE	(				\
71 	sizeof(struct proc_dir_entry) < 128 ? 128 :	\
72 	sizeof(struct proc_dir_entry) < 192 ? 192 :	\
73 	sizeof(struct proc_dir_entry) < 256 ? 256 :	\
74 	sizeof(struct proc_dir_entry) < 512 ? 512 :	\
75 	0)
76 #define SIZEOF_PDE_INLINE_NAME (SIZEOF_PDE - sizeof(struct proc_dir_entry))
77 
78 static inline bool pde_is_permanent(const struct proc_dir_entry *pde)
79 {
80 	return pde->flags & PROC_ENTRY_PERMANENT;
81 }
82 
83 static inline void pde_make_permanent(struct proc_dir_entry *pde)
84 {
85 	pde->flags |= PROC_ENTRY_PERMANENT;
86 }
87 
88 static inline bool pde_has_proc_read_iter(const struct proc_dir_entry *pde)
89 {
90 	return pde->flags & PROC_ENTRY_proc_read_iter;
91 }
92 
93 static inline bool pde_has_proc_compat_ioctl(const struct proc_dir_entry *pde)
94 {
95 #ifdef CONFIG_COMPAT
96 	return pde->flags & PROC_ENTRY_proc_compat_ioctl;
97 #else
98 	return false;
99 #endif
100 }
101 
102 static inline bool pde_has_proc_lseek(const struct proc_dir_entry *pde)
103 {
104 	return pde->flags & PROC_ENTRY_proc_lseek;
105 }
106 
107 extern struct kmem_cache *proc_dir_entry_cache;
108 void pde_free(struct proc_dir_entry *pde);
109 
110 union proc_op {
111 	int (*proc_get_link)(struct dentry *, struct path *);
112 	int (*proc_show)(struct seq_file *m,
113 		struct pid_namespace *ns, struct pid *pid,
114 		struct task_struct *task);
115 	int lsmid;
116 };
117 
118 struct proc_inode {
119 	struct pid *pid;
120 	unsigned int fd;
121 	union proc_op op;
122 	struct proc_dir_entry *pde;
123 	struct ctl_table_header *sysctl;
124 	const struct ctl_table *sysctl_entry;
125 	struct hlist_node sibling_inodes;
126 	const struct proc_ns_operations *ns_ops;
127 	struct inode vfs_inode;
128 } __randomize_layout;
129 
130 /*
131  * General functions
132  */
133 static inline struct proc_inode *PROC_I(const struct inode *inode)
134 {
135 	return container_of(inode, struct proc_inode, vfs_inode);
136 }
137 
138 static inline struct proc_dir_entry *PDE(const struct inode *inode)
139 {
140 	return PROC_I(inode)->pde;
141 }
142 
143 static inline struct pid *proc_pid(const struct inode *inode)
144 {
145 	return PROC_I(inode)->pid;
146 }
147 
148 static inline struct task_struct *get_proc_task(const struct inode *inode)
149 {
150 	return get_pid_task(proc_pid(inode), PIDTYPE_PID);
151 }
152 
153 void task_dump_owner(struct task_struct *task, umode_t mode,
154 		     kuid_t *ruid, kgid_t *rgid);
155 
156 unsigned name_to_int(const struct qstr *qstr);
157 /*
158  * Offset of the first process in the /proc root directory..
159  */
160 #define FIRST_PROCESS_ENTRY 256
161 
162 /* Worst case buffer size needed for holding an integer. */
163 #define PROC_NUMBUF 13
164 
165 #ifdef CONFIG_PAGE_MAPCOUNT
166 /**
167  * folio_precise_page_mapcount() - Number of mappings of this folio page.
168  * @folio: The folio.
169  * @page: The page.
170  *
171  * The number of present user page table entries that reference this page
172  * as tracked via the RMAP: either referenced directly (PTE) or as part of
173  * a larger area that covers this page (e.g., PMD).
174  *
175  * Use this function only for the calculation of existing statistics
176  * (USS, PSS, mapcount_max) and for debugging purposes (/proc/kpagecount).
177  *
178  * Do not add new users.
179  *
180  * Returns: The number of mappings of this folio page. 0 for
181  * folios that are not mapped to user space or are not tracked via the RMAP
182  * (e.g., shared zeropage).
183  */
184 static inline int folio_precise_page_mapcount(struct folio *folio,
185 		struct page *page)
186 {
187 	int mapcount = atomic_read(&page->_mapcount) + 1;
188 
189 	if (page_mapcount_is_type(mapcount))
190 		mapcount = 0;
191 	if (folio_test_large(folio))
192 		mapcount += folio_entire_mapcount(folio);
193 
194 	return mapcount;
195 }
196 #else /* !CONFIG_PAGE_MAPCOUNT */
197 static inline int folio_precise_page_mapcount(struct folio *folio,
198 		struct page *page)
199 {
200 	BUILD_BUG();
201 }
202 #endif /* CONFIG_PAGE_MAPCOUNT */
203 
204 /**
205  * folio_average_page_mapcount() - Average number of mappings per page in this
206  *				   folio
207  * @folio: The folio.
208  *
209  * The average number of user page table entries that reference each page in
210  * this folio as tracked via the RMAP: either referenced directly (PTE) or
211  * as part of a larger area that covers this page (e.g., PMD).
212  *
213  * The average is calculated by rounding to the nearest integer; however,
214  * to avoid duplicated code in current callers, the average is at least
215  * 1 if any page of the folio is mapped.
216  *
217  * Returns: The average number of mappings per page in this folio.
218  */
219 static inline int folio_average_page_mapcount(struct folio *folio)
220 {
221 	int mapcount, entire_mapcount, avg;
222 
223 	if (!folio_test_large(folio))
224 		return atomic_read(&folio->_mapcount) + 1;
225 
226 	mapcount = folio_large_mapcount(folio);
227 	if (unlikely(mapcount <= 0))
228 		return 0;
229 	entire_mapcount = folio_entire_mapcount(folio);
230 	if (mapcount <= entire_mapcount)
231 		return entire_mapcount;
232 	mapcount -= entire_mapcount;
233 
234 	/* Round to closest integer ... */
235 	avg = ((unsigned int)mapcount + folio_large_nr_pages(folio) / 2) >> folio_large_order(folio);
236 	/* ... but return at least 1. */
237 	return max_t(int, avg + entire_mapcount, 1);
238 }
239 /*
240  * array.c
241  */
242 extern const struct file_operations proc_tid_children_operations;
243 
244 extern void proc_task_name(struct seq_file *m, struct task_struct *p,
245 			   bool escape);
246 extern int proc_tid_stat(struct seq_file *, struct pid_namespace *,
247 			 struct pid *, struct task_struct *);
248 extern int proc_tgid_stat(struct seq_file *, struct pid_namespace *,
249 			  struct pid *, struct task_struct *);
250 extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
251 			   struct pid *, struct task_struct *);
252 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
253 			  struct pid *, struct task_struct *);
254 
255 /*
256  * base.c
257  */
258 extern const struct dentry_operations pid_dentry_operations;
259 extern int pid_getattr(struct mnt_idmap *, const struct path *,
260 		       struct kstat *, u32, unsigned int);
261 extern int proc_setattr(struct mnt_idmap *, struct dentry *,
262 			struct iattr *);
263 extern void proc_pid_evict_inode(struct proc_inode *);
264 extern struct inode *proc_pid_make_inode(struct super_block *, struct task_struct *, umode_t);
265 extern void pid_update_inode(struct task_struct *, struct inode *);
266 extern int pid_delete_dentry(const struct dentry *);
267 extern int proc_pid_readdir(struct file *, struct dir_context *);
268 struct dentry *proc_pid_lookup(struct dentry *, unsigned int);
269 extern loff_t mem_lseek(struct file *, loff_t, int);
270 
271 /* Lookups */
272 typedef struct dentry *instantiate_t(struct dentry *,
273 				     struct task_struct *, const void *);
274 bool proc_fill_cache(struct file *, struct dir_context *, const char *, unsigned int,
275 			   instantiate_t, struct task_struct *, const void *);
276 
277 /*
278  * generic.c
279  */
280 struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode,
281 		struct proc_dir_entry **parent, void *data);
282 struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
283 		struct proc_dir_entry *dp);
284 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
285 struct dentry *proc_lookup_de(struct inode *, struct dentry *, struct proc_dir_entry *);
286 extern int proc_readdir(struct file *, struct dir_context *);
287 int proc_readdir_de(struct file *, struct dir_context *, struct proc_dir_entry *);
288 
289 static inline void pde_get(struct proc_dir_entry *pde)
290 {
291 	refcount_inc(&pde->refcnt);
292 }
293 extern void pde_put(struct proc_dir_entry *);
294 
295 static inline bool is_empty_pde(const struct proc_dir_entry *pde)
296 {
297 	return S_ISDIR(pde->mode) && !pde->proc_iops;
298 }
299 extern ssize_t proc_simple_write(struct file *, const char __user *, size_t, loff_t *);
300 
301 /*
302  * inode.c
303  */
304 struct pde_opener {
305 	struct list_head lh;
306 	struct file *file;
307 	bool closing;
308 	struct completion *c;
309 } __randomize_layout;
310 extern const struct inode_operations proc_link_inode_operations;
311 extern const struct inode_operations proc_pid_link_inode_operations;
312 extern const struct super_operations proc_sops;
313 
314 void proc_init_kmemcache(void);
315 void proc_invalidate_siblings_dcache(struct hlist_head *inodes, spinlock_t *lock);
316 void set_proc_pid_nlink(void);
317 extern struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
318 extern void proc_entry_rundown(struct proc_dir_entry *);
319 
320 /*
321  * proc_namespaces.c
322  */
323 extern const struct inode_operations proc_ns_dir_inode_operations;
324 extern const struct file_operations proc_ns_dir_operations;
325 
326 /*
327  * proc_net.c
328  */
329 extern const struct file_operations proc_net_operations;
330 extern const struct inode_operations proc_net_inode_operations;
331 
332 #ifdef CONFIG_NET
333 extern int proc_net_init(void);
334 #else
335 static inline int proc_net_init(void) { return 0; }
336 #endif
337 
338 /*
339  * proc_self.c
340  */
341 extern int proc_setup_self(struct super_block *);
342 
343 /*
344  * proc_thread_self.c
345  */
346 extern int proc_setup_thread_self(struct super_block *);
347 extern void proc_thread_self_init(void);
348 
349 /*
350  * proc_sysctl.c
351  */
352 #ifdef CONFIG_PROC_SYSCTL
353 extern int proc_sys_init(void);
354 extern void proc_sys_evict_inode(struct inode *inode,
355 				 struct ctl_table_header *head);
356 #else
357 static inline void proc_sys_init(void) { }
358 static inline void proc_sys_evict_inode(struct  inode *inode,
359 					struct ctl_table_header *head) { }
360 #endif
361 
362 /*
363  * proc_tty.c
364  */
365 #ifdef CONFIG_TTY
366 extern void proc_tty_init(void);
367 #else
368 static inline void proc_tty_init(void) {}
369 #endif
370 
371 /*
372  * root.c
373  */
374 extern struct proc_dir_entry proc_root;
375 
376 extern void proc_self_init(void);
377 
378 /*
379  * task_[no]mmu.c
380  */
381 struct mem_size_stats;
382 struct proc_maps_private {
383 	struct inode *inode;
384 	struct task_struct *task;
385 	struct mm_struct *mm;
386 	struct vma_iterator iter;
387 #ifdef CONFIG_NUMA
388 	struct mempolicy *task_mempolicy;
389 #endif
390 } __randomize_layout;
391 
392 struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode);
393 
394 extern const struct file_operations proc_pid_maps_operations;
395 extern const struct file_operations proc_pid_numa_maps_operations;
396 extern const struct file_operations proc_pid_smaps_operations;
397 extern const struct file_operations proc_pid_smaps_rollup_operations;
398 extern const struct file_operations proc_clear_refs_operations;
399 extern const struct file_operations proc_pagemap_operations;
400 
401 extern unsigned long task_vsize(struct mm_struct *);
402 extern unsigned long task_statm(struct mm_struct *,
403 				unsigned long *, unsigned long *,
404 				unsigned long *, unsigned long *);
405 extern void task_mem(struct seq_file *, struct mm_struct *);
406 
407 extern const struct dentry_operations proc_net_dentry_ops;
408 static inline void pde_force_lookup(struct proc_dir_entry *pde)
409 {
410 	/* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
411 	pde->proc_dops = &proc_net_dentry_ops;
412 }
413 
414 /*
415  * Add a new procfs dentry that can't serve as a mountpoint. That should
416  * encompass anything that is ephemeral and can just disappear while the
417  * process is still around.
418  */
419 static inline struct dentry *proc_splice_unmountable(struct inode *inode,
420 		struct dentry *dentry, const struct dentry_operations *d_ops)
421 {
422 	d_set_d_op(dentry, d_ops);
423 	dont_mount(dentry);
424 	return d_splice_alias(inode, dentry);
425 }
426