xref: /linux/mm/secretmem.c (revision 722ecdbce68a87de2d9296f91308f44ea900a039)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corporation, 2021
4  *
5  * Author: Mike Rapoport <rppt@linux.ibm.com>
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/fs.h>
10 #include <linux/swap.h>
11 #include <linux/mount.h>
12 #include <linux/memfd.h>
13 #include <linux/bitops.h>
14 #include <linux/printk.h>
15 #include <linux/pagemap.h>
16 #include <linux/syscalls.h>
17 #include <linux/pseudo_fs.h>
18 #include <linux/secretmem.h>
19 #include <linux/set_memory.h>
20 #include <linux/sched/signal.h>
21 
22 #include <uapi/linux/magic.h>
23 
24 #include <asm/tlbflush.h>
25 
26 #include "internal.h"
27 
28 #undef pr_fmt
29 #define pr_fmt(fmt) "secretmem: " fmt
30 
31 /*
32  * Define mode and flag masks to allow validation of the system call
33  * parameters.
34  */
35 #define SECRETMEM_MODE_MASK	(0x0)
36 #define SECRETMEM_FLAGS_MASK	SECRETMEM_MODE_MASK
37 
38 static bool secretmem_enable __ro_after_init;
39 module_param_named(enable, secretmem_enable, bool, 0400);
40 MODULE_PARM_DESC(secretmem_enable,
41 		 "Enable secretmem and memfd_secret(2) system call");
42 
43 static atomic_t secretmem_users;
44 
45 bool secretmem_active(void)
46 {
47 	return !!atomic_read(&secretmem_users);
48 }
49 
50 static vm_fault_t secretmem_fault(struct vm_fault *vmf)
51 {
52 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
53 	struct inode *inode = file_inode(vmf->vma->vm_file);
54 	pgoff_t offset = vmf->pgoff;
55 	gfp_t gfp = vmf->gfp_mask;
56 	unsigned long addr;
57 	struct page *page;
58 	int err;
59 
60 	if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode))
61 		return vmf_error(-EINVAL);
62 
63 retry:
64 	page = find_lock_page(mapping, offset);
65 	if (!page) {
66 		page = alloc_page(gfp | __GFP_ZERO);
67 		if (!page)
68 			return VM_FAULT_OOM;
69 
70 		err = set_direct_map_invalid_noflush(page);
71 		if (err) {
72 			put_page(page);
73 			return vmf_error(err);
74 		}
75 
76 		__SetPageUptodate(page);
77 		err = add_to_page_cache_lru(page, mapping, offset, gfp);
78 		if (unlikely(err)) {
79 			put_page(page);
80 			/*
81 			 * If a split of large page was required, it
82 			 * already happened when we marked the page invalid
83 			 * which guarantees that this call won't fail
84 			 */
85 			set_direct_map_default_noflush(page);
86 			if (err == -EEXIST)
87 				goto retry;
88 
89 			return vmf_error(err);
90 		}
91 
92 		addr = (unsigned long)page_address(page);
93 		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
94 	}
95 
96 	vmf->page = page;
97 	return VM_FAULT_LOCKED;
98 }
99 
100 static const struct vm_operations_struct secretmem_vm_ops = {
101 	.fault = secretmem_fault,
102 };
103 
104 static int secretmem_release(struct inode *inode, struct file *file)
105 {
106 	atomic_dec(&secretmem_users);
107 	return 0;
108 }
109 
110 static int secretmem_mmap(struct file *file, struct vm_area_struct *vma)
111 {
112 	unsigned long len = vma->vm_end - vma->vm_start;
113 
114 	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
115 		return -EINVAL;
116 
117 	if (mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len))
118 		return -EAGAIN;
119 
120 	vma->vm_flags |= VM_LOCKED | VM_DONTDUMP;
121 	vma->vm_ops = &secretmem_vm_ops;
122 
123 	return 0;
124 }
125 
126 bool vma_is_secretmem(struct vm_area_struct *vma)
127 {
128 	return vma->vm_ops == &secretmem_vm_ops;
129 }
130 
131 static const struct file_operations secretmem_fops = {
132 	.release	= secretmem_release,
133 	.mmap		= secretmem_mmap,
134 };
135 
136 static bool secretmem_isolate_page(struct page *page, isolate_mode_t mode)
137 {
138 	return false;
139 }
140 
141 static int secretmem_migratepage(struct address_space *mapping,
142 				 struct page *newpage, struct page *page,
143 				 enum migrate_mode mode)
144 {
145 	return -EBUSY;
146 }
147 
148 static void secretmem_free_folio(struct folio *folio)
149 {
150 	set_direct_map_default_noflush(&folio->page);
151 	folio_zero_segment(folio, 0, folio_size(folio));
152 }
153 
154 const struct address_space_operations secretmem_aops = {
155 	.dirty_folio	= noop_dirty_folio,
156 	.free_folio	= secretmem_free_folio,
157 	.migratepage	= secretmem_migratepage,
158 	.isolate_page	= secretmem_isolate_page,
159 };
160 
161 static int secretmem_setattr(struct user_namespace *mnt_userns,
162 			     struct dentry *dentry, struct iattr *iattr)
163 {
164 	struct inode *inode = d_inode(dentry);
165 	unsigned int ia_valid = iattr->ia_valid;
166 
167 	if ((ia_valid & ATTR_SIZE) && inode->i_size)
168 		return -EINVAL;
169 
170 	return simple_setattr(mnt_userns, dentry, iattr);
171 }
172 
173 static const struct inode_operations secretmem_iops = {
174 	.setattr = secretmem_setattr,
175 };
176 
177 static struct vfsmount *secretmem_mnt;
178 
179 static struct file *secretmem_file_create(unsigned long flags)
180 {
181 	struct file *file = ERR_PTR(-ENOMEM);
182 	struct inode *inode;
183 
184 	inode = alloc_anon_inode(secretmem_mnt->mnt_sb);
185 	if (IS_ERR(inode))
186 		return ERR_CAST(inode);
187 
188 	file = alloc_file_pseudo(inode, secretmem_mnt, "secretmem",
189 				 O_RDWR, &secretmem_fops);
190 	if (IS_ERR(file))
191 		goto err_free_inode;
192 
193 	mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
194 	mapping_set_unevictable(inode->i_mapping);
195 
196 	inode->i_op = &secretmem_iops;
197 	inode->i_mapping->a_ops = &secretmem_aops;
198 
199 	/* pretend we are a normal file with zero size */
200 	inode->i_mode |= S_IFREG;
201 	inode->i_size = 0;
202 
203 	return file;
204 
205 err_free_inode:
206 	iput(inode);
207 	return file;
208 }
209 
210 SYSCALL_DEFINE1(memfd_secret, unsigned int, flags)
211 {
212 	struct file *file;
213 	int fd, err;
214 
215 	/* make sure local flags do not confict with global fcntl.h */
216 	BUILD_BUG_ON(SECRETMEM_FLAGS_MASK & O_CLOEXEC);
217 
218 	if (!secretmem_enable)
219 		return -ENOSYS;
220 
221 	if (flags & ~(SECRETMEM_FLAGS_MASK | O_CLOEXEC))
222 		return -EINVAL;
223 	if (atomic_read(&secretmem_users) < 0)
224 		return -ENFILE;
225 
226 	fd = get_unused_fd_flags(flags & O_CLOEXEC);
227 	if (fd < 0)
228 		return fd;
229 
230 	file = secretmem_file_create(flags);
231 	if (IS_ERR(file)) {
232 		err = PTR_ERR(file);
233 		goto err_put_fd;
234 	}
235 
236 	file->f_flags |= O_LARGEFILE;
237 
238 	atomic_inc(&secretmem_users);
239 	fd_install(fd, file);
240 	return fd;
241 
242 err_put_fd:
243 	put_unused_fd(fd);
244 	return err;
245 }
246 
247 static int secretmem_init_fs_context(struct fs_context *fc)
248 {
249 	return init_pseudo(fc, SECRETMEM_MAGIC) ? 0 : -ENOMEM;
250 }
251 
252 static struct file_system_type secretmem_fs = {
253 	.name		= "secretmem",
254 	.init_fs_context = secretmem_init_fs_context,
255 	.kill_sb	= kill_anon_super,
256 };
257 
258 static int secretmem_init(void)
259 {
260 	int ret = 0;
261 
262 	if (!secretmem_enable)
263 		return ret;
264 
265 	secretmem_mnt = kern_mount(&secretmem_fs);
266 	if (IS_ERR(secretmem_mnt))
267 		ret = PTR_ERR(secretmem_mnt);
268 
269 	/* prevent secretmem mappings from ever getting PROT_EXEC */
270 	secretmem_mnt->mnt_flags |= MNT_NOEXEC;
271 
272 	return ret;
273 }
274 fs_initcall(secretmem_init);
275