xref: /linux/mm/memfd.c (revision beace86e61e465dba204a268ab3f3377153a4973)
1 /*
2  * memfd_create system call and file sealing support
3  *
4  * Code was originally included in shmem.c, and broken out to facilitate
5  * use by hugetlbfs as well as tmpfs.
6  *
7  * This file is released under the GPL.
8  */
9 
10 #include <linux/fs.h>
11 #include <linux/vfs.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/mm.h>
15 #include <linux/sched/signal.h>
16 #include <linux/khugepaged.h>
17 #include <linux/syscalls.h>
18 #include <linux/hugetlb.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/memfd.h>
21 #include <linux/pid_namespace.h>
22 #include <uapi/linux/memfd.h>
23 #include "swap.h"
24 
25 /*
26  * We need a tag: a new tag would expand every xa_node by 8 bytes,
27  * so reuse a tag which we firmly believe is never set or cleared on tmpfs
28  * or hugetlbfs because they are memory only filesystems.
29  */
30 #define MEMFD_TAG_PINNED        PAGECACHE_TAG_TOWRITE
31 #define LAST_SCAN               4       /* about 150ms max */
32 
33 static bool memfd_folio_has_extra_refs(struct folio *folio)
34 {
35 	return folio_ref_count(folio) != folio_expected_ref_count(folio);
36 }
37 
38 static void memfd_tag_pins(struct xa_state *xas)
39 {
40 	struct folio *folio;
41 	int latency = 0;
42 
43 	lru_add_drain();
44 
45 	xas_lock_irq(xas);
46 	xas_for_each(xas, folio, ULONG_MAX) {
47 		if (!xa_is_value(folio) && memfd_folio_has_extra_refs(folio))
48 			xas_set_mark(xas, MEMFD_TAG_PINNED);
49 
50 		if (++latency < XA_CHECK_SCHED)
51 			continue;
52 		latency = 0;
53 
54 		xas_pause(xas);
55 		xas_unlock_irq(xas);
56 		cond_resched();
57 		xas_lock_irq(xas);
58 	}
59 	xas_unlock_irq(xas);
60 }
61 
62 /*
63  * This is a helper function used by memfd_pin_user_pages() in GUP (gup.c).
64  * It is mainly called to allocate a folio in a memfd when the caller
65  * (memfd_pin_folios()) cannot find a folio in the page cache at a given
66  * index in the mapping.
67  */
68 struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
69 {
70 #ifdef CONFIG_HUGETLB_PAGE
71 	struct folio *folio;
72 	gfp_t gfp_mask;
73 
74 	if (is_file_hugepages(memfd)) {
75 		/*
76 		 * The folio would most likely be accessed by a DMA driver,
77 		 * therefore, we have zone memory constraints where we can
78 		 * alloc from. Also, the folio will be pinned for an indefinite
79 		 * amount of time, so it is not expected to be migrated away.
80 		 */
81 		struct inode *inode = file_inode(memfd);
82 		struct hstate *h = hstate_file(memfd);
83 		int err = -ENOMEM;
84 		long nr_resv;
85 
86 		gfp_mask = htlb_alloc_mask(h);
87 		gfp_mask &= ~(__GFP_HIGHMEM | __GFP_MOVABLE);
88 		idx >>= huge_page_order(h);
89 
90 		nr_resv = hugetlb_reserve_pages(inode, idx, idx + 1, NULL, 0);
91 		if (nr_resv < 0)
92 			return ERR_PTR(nr_resv);
93 
94 		folio = alloc_hugetlb_folio_reserve(h,
95 						    numa_node_id(),
96 						    NULL,
97 						    gfp_mask);
98 		if (folio) {
99 			err = hugetlb_add_to_page_cache(folio,
100 							memfd->f_mapping,
101 							idx);
102 			if (err) {
103 				folio_put(folio);
104 				goto err_unresv;
105 			}
106 
107 			hugetlb_set_folio_subpool(folio, subpool_inode(inode));
108 			folio_unlock(folio);
109 			return folio;
110 		}
111 err_unresv:
112 		if (nr_resv > 0)
113 			hugetlb_unreserve_pages(inode, idx, idx + 1, 0);
114 		return ERR_PTR(err);
115 	}
116 #endif
117 	return shmem_read_folio(memfd->f_mapping, idx);
118 }
119 
120 /*
121  * Setting SEAL_WRITE requires us to verify there's no pending writer. However,
122  * via get_user_pages(), drivers might have some pending I/O without any active
123  * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all folios
124  * and see whether it has an elevated ref-count. If so, we tag them and wait for
125  * them to be dropped.
126  * The caller must guarantee that no new user will acquire writable references
127  * to those folios to avoid races.
128  */
129 static int memfd_wait_for_pins(struct address_space *mapping)
130 {
131 	XA_STATE(xas, &mapping->i_pages, 0);
132 	struct folio *folio;
133 	int error, scan;
134 
135 	memfd_tag_pins(&xas);
136 
137 	error = 0;
138 	for (scan = 0; scan <= LAST_SCAN; scan++) {
139 		int latency = 0;
140 
141 		if (!xas_marked(&xas, MEMFD_TAG_PINNED))
142 			break;
143 
144 		if (!scan)
145 			lru_add_drain_all();
146 		else if (schedule_timeout_killable((HZ << scan) / 200))
147 			scan = LAST_SCAN;
148 
149 		xas_set(&xas, 0);
150 		xas_lock_irq(&xas);
151 		xas_for_each_marked(&xas, folio, ULONG_MAX, MEMFD_TAG_PINNED) {
152 			bool clear = true;
153 
154 			if (!xa_is_value(folio) &&
155 			    memfd_folio_has_extra_refs(folio)) {
156 				/*
157 				 * On the last scan, we clean up all those tags
158 				 * we inserted; but make a note that we still
159 				 * found folios pinned.
160 				 */
161 				if (scan == LAST_SCAN)
162 					error = -EBUSY;
163 				else
164 					clear = false;
165 			}
166 			if (clear)
167 				xas_clear_mark(&xas, MEMFD_TAG_PINNED);
168 
169 			if (++latency < XA_CHECK_SCHED)
170 				continue;
171 			latency = 0;
172 
173 			xas_pause(&xas);
174 			xas_unlock_irq(&xas);
175 			cond_resched();
176 			xas_lock_irq(&xas);
177 		}
178 		xas_unlock_irq(&xas);
179 	}
180 
181 	return error;
182 }
183 
184 static unsigned int *memfd_file_seals_ptr(struct file *file)
185 {
186 	if (shmem_file(file))
187 		return &SHMEM_I(file_inode(file))->seals;
188 
189 #ifdef CONFIG_HUGETLBFS
190 	if (is_file_hugepages(file))
191 		return &HUGETLBFS_I(file_inode(file))->seals;
192 #endif
193 
194 	return NULL;
195 }
196 
197 #define F_ALL_SEALS (F_SEAL_SEAL | \
198 		     F_SEAL_EXEC | \
199 		     F_SEAL_SHRINK | \
200 		     F_SEAL_GROW | \
201 		     F_SEAL_WRITE | \
202 		     F_SEAL_FUTURE_WRITE)
203 
204 static int memfd_add_seals(struct file *file, unsigned int seals)
205 {
206 	struct inode *inode = file_inode(file);
207 	unsigned int *file_seals;
208 	int error;
209 
210 	/*
211 	 * SEALING
212 	 * Sealing allows multiple parties to share a tmpfs or hugetlbfs file
213 	 * but restrict access to a specific subset of file operations. Seals
214 	 * can only be added, but never removed. This way, mutually untrusted
215 	 * parties can share common memory regions with a well-defined policy.
216 	 * A malicious peer can thus never perform unwanted operations on a
217 	 * shared object.
218 	 *
219 	 * Seals are only supported on special tmpfs or hugetlbfs files and
220 	 * always affect the whole underlying inode. Once a seal is set, it
221 	 * may prevent some kinds of access to the file. Currently, the
222 	 * following seals are defined:
223 	 *   SEAL_SEAL: Prevent further seals from being set on this file
224 	 *   SEAL_SHRINK: Prevent the file from shrinking
225 	 *   SEAL_GROW: Prevent the file from growing
226 	 *   SEAL_WRITE: Prevent write access to the file
227 	 *   SEAL_EXEC: Prevent modification of the exec bits in the file mode
228 	 *
229 	 * As we don't require any trust relationship between two parties, we
230 	 * must prevent seals from being removed. Therefore, sealing a file
231 	 * only adds a given set of seals to the file, it never touches
232 	 * existing seals. Furthermore, the "setting seals"-operation can be
233 	 * sealed itself, which basically prevents any further seal from being
234 	 * added.
235 	 *
236 	 * Semantics of sealing are only defined on volatile files. Only
237 	 * anonymous tmpfs and hugetlbfs files support sealing. More
238 	 * importantly, seals are never written to disk. Therefore, there's
239 	 * no plan to support it on other file types.
240 	 */
241 
242 	if (!(file->f_mode & FMODE_WRITE))
243 		return -EPERM;
244 	if (seals & ~(unsigned int)F_ALL_SEALS)
245 		return -EINVAL;
246 
247 	inode_lock(inode);
248 
249 	file_seals = memfd_file_seals_ptr(file);
250 	if (!file_seals) {
251 		error = -EINVAL;
252 		goto unlock;
253 	}
254 
255 	if (*file_seals & F_SEAL_SEAL) {
256 		error = -EPERM;
257 		goto unlock;
258 	}
259 
260 	if ((seals & F_SEAL_WRITE) && !(*file_seals & F_SEAL_WRITE)) {
261 		error = mapping_deny_writable(file->f_mapping);
262 		if (error)
263 			goto unlock;
264 
265 		error = memfd_wait_for_pins(file->f_mapping);
266 		if (error) {
267 			mapping_allow_writable(file->f_mapping);
268 			goto unlock;
269 		}
270 	}
271 
272 	/*
273 	 * SEAL_EXEC implies SEAL_WRITE, making W^X from the start.
274 	 */
275 	if (seals & F_SEAL_EXEC && inode->i_mode & 0111)
276 		seals |= F_SEAL_SHRINK|F_SEAL_GROW|F_SEAL_WRITE|F_SEAL_FUTURE_WRITE;
277 
278 	*file_seals |= seals;
279 	error = 0;
280 
281 unlock:
282 	inode_unlock(inode);
283 	return error;
284 }
285 
286 static int memfd_get_seals(struct file *file)
287 {
288 	unsigned int *seals = memfd_file_seals_ptr(file);
289 
290 	return seals ? *seals : -EINVAL;
291 }
292 
293 long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg)
294 {
295 	long error;
296 
297 	switch (cmd) {
298 	case F_ADD_SEALS:
299 		error = memfd_add_seals(file, arg);
300 		break;
301 	case F_GET_SEALS:
302 		error = memfd_get_seals(file);
303 		break;
304 	default:
305 		error = -EINVAL;
306 		break;
307 	}
308 
309 	return error;
310 }
311 
312 #define MFD_NAME_PREFIX "memfd:"
313 #define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1)
314 #define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN)
315 
316 #define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING | MFD_HUGETLB | MFD_NOEXEC_SEAL | MFD_EXEC)
317 
318 static int check_sysctl_memfd_noexec(unsigned int *flags)
319 {
320 #ifdef CONFIG_SYSCTL
321 	struct pid_namespace *ns = task_active_pid_ns(current);
322 	int sysctl = pidns_memfd_noexec_scope(ns);
323 
324 	if (!(*flags & (MFD_EXEC | MFD_NOEXEC_SEAL))) {
325 		if (sysctl >= MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL)
326 			*flags |= MFD_NOEXEC_SEAL;
327 		else
328 			*flags |= MFD_EXEC;
329 	}
330 
331 	if (!(*flags & MFD_NOEXEC_SEAL) && sysctl >= MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED) {
332 		pr_err_ratelimited(
333 			"%s[%d]: memfd_create() requires MFD_NOEXEC_SEAL with vm.memfd_noexec=%d\n",
334 			current->comm, task_pid_nr(current), sysctl);
335 		return -EACCES;
336 	}
337 #endif
338 	return 0;
339 }
340 
341 static inline bool is_write_sealed(unsigned int seals)
342 {
343 	return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE);
344 }
345 
346 static int check_write_seal(vm_flags_t *vm_flags_ptr)
347 {
348 	vm_flags_t vm_flags = *vm_flags_ptr;
349 	vm_flags_t mask = vm_flags & (VM_SHARED | VM_WRITE);
350 
351 	/* If a private mapping then writability is irrelevant. */
352 	if (!(mask & VM_SHARED))
353 		return 0;
354 
355 	/*
356 	 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
357 	 * write seals are active.
358 	 */
359 	if (mask & VM_WRITE)
360 		return -EPERM;
361 
362 	/*
363 	 * This is a read-only mapping, disallow mprotect() from making a
364 	 * write-sealed mapping writable in future.
365 	 */
366 	*vm_flags_ptr &= ~VM_MAYWRITE;
367 
368 	return 0;
369 }
370 
371 int memfd_check_seals_mmap(struct file *file, vm_flags_t *vm_flags_ptr)
372 {
373 	int err = 0;
374 	unsigned int *seals_ptr = memfd_file_seals_ptr(file);
375 	unsigned int seals = seals_ptr ? *seals_ptr : 0;
376 
377 	if (is_write_sealed(seals))
378 		err = check_write_seal(vm_flags_ptr);
379 
380 	return err;
381 }
382 
383 static int sanitize_flags(unsigned int *flags_ptr)
384 {
385 	unsigned int flags = *flags_ptr;
386 
387 	if (!(flags & MFD_HUGETLB)) {
388 		if (flags & ~(unsigned int)MFD_ALL_FLAGS)
389 			return -EINVAL;
390 	} else {
391 		/* Allow huge page size encoding in flags. */
392 		if (flags & ~(unsigned int)(MFD_ALL_FLAGS |
393 				(MFD_HUGE_MASK << MFD_HUGE_SHIFT)))
394 			return -EINVAL;
395 	}
396 
397 	/* Invalid if both EXEC and NOEXEC_SEAL are set.*/
398 	if ((flags & MFD_EXEC) && (flags & MFD_NOEXEC_SEAL))
399 		return -EINVAL;
400 
401 	return check_sysctl_memfd_noexec(flags_ptr);
402 }
403 
404 static char *alloc_name(const char __user *uname)
405 {
406 	int error;
407 	char *name;
408 	long len;
409 
410 	name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
411 	if (!name)
412 		return ERR_PTR(-ENOMEM);
413 
414 	memcpy(name, MFD_NAME_PREFIX, MFD_NAME_PREFIX_LEN);
415 	/* returned length does not include terminating zero */
416 	len = strncpy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, MFD_NAME_MAX_LEN + 1);
417 	if (len < 0) {
418 		error = -EFAULT;
419 		goto err_name;
420 	} else if (len > MFD_NAME_MAX_LEN) {
421 		error = -EINVAL;
422 		goto err_name;
423 	}
424 
425 	return name;
426 
427 err_name:
428 	kfree(name);
429 	return ERR_PTR(error);
430 }
431 
432 static struct file *alloc_file(const char *name, unsigned int flags)
433 {
434 	unsigned int *file_seals;
435 	struct file *file;
436 
437 	if (flags & MFD_HUGETLB) {
438 		file = hugetlb_file_setup(name, 0, VM_NORESERVE,
439 					HUGETLB_ANONHUGE_INODE,
440 					(flags >> MFD_HUGE_SHIFT) &
441 					MFD_HUGE_MASK);
442 	} else {
443 		file = shmem_file_setup(name, 0, VM_NORESERVE);
444 	}
445 	if (IS_ERR(file))
446 		return file;
447 	file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
448 	file->f_flags |= O_LARGEFILE;
449 
450 	if (flags & MFD_NOEXEC_SEAL) {
451 		struct inode *inode = file_inode(file);
452 
453 		inode->i_mode &= ~0111;
454 		file_seals = memfd_file_seals_ptr(file);
455 		if (file_seals) {
456 			*file_seals &= ~F_SEAL_SEAL;
457 			*file_seals |= F_SEAL_EXEC;
458 		}
459 	} else if (flags & MFD_ALLOW_SEALING) {
460 		/* MFD_EXEC and MFD_ALLOW_SEALING are set */
461 		file_seals = memfd_file_seals_ptr(file);
462 		if (file_seals)
463 			*file_seals &= ~F_SEAL_SEAL;
464 	}
465 
466 	return file;
467 }
468 
469 SYSCALL_DEFINE2(memfd_create,
470 		const char __user *, uname,
471 		unsigned int, flags)
472 {
473 	struct file *file;
474 	int fd, error;
475 	char *name;
476 
477 	error = sanitize_flags(&flags);
478 	if (error < 0)
479 		return error;
480 
481 	name = alloc_name(uname);
482 	if (IS_ERR(name))
483 		return PTR_ERR(name);
484 
485 	fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0);
486 	if (fd < 0) {
487 		error = fd;
488 		goto err_free_name;
489 	}
490 
491 	file = alloc_file(name, flags);
492 	if (IS_ERR(file)) {
493 		error = PTR_ERR(file);
494 		goto err_free_fd;
495 	}
496 
497 	fd_install(fd, file);
498 	kfree(name);
499 	return fd;
500 
501 err_free_fd:
502 	put_unused_fd(fd);
503 err_free_name:
504 	kfree(name);
505 	return error;
506 }
507