xref: /linux/mm/secretmem.c (revision 6aacab308a5dfd222b2d23662bbae60c11007cfb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corporation, 2021
4  *
5  * Author: Mike Rapoport <rppt@linux.ibm.com>
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/fs.h>
10 #include <linux/swap.h>
11 #include <linux/mount.h>
12 #include <linux/memfd.h>
13 #include <linux/bitops.h>
14 #include <linux/printk.h>
15 #include <linux/pagemap.h>
16 #include <linux/syscalls.h>
17 #include <linux/pseudo_fs.h>
18 #include <linux/secretmem.h>
19 #include <linux/set_memory.h>
20 #include <linux/sched/signal.h>
21 
22 #include <uapi/linux/magic.h>
23 
24 #include <asm/tlbflush.h>
25 
26 #include "internal.h"
27 
28 #undef pr_fmt
29 #define pr_fmt(fmt) "secretmem: " fmt
30 
31 /*
32  * Define mode and flag masks to allow validation of the system call
33  * parameters.
34  */
35 #define SECRETMEM_MODE_MASK	(0x0)
36 #define SECRETMEM_FLAGS_MASK	SECRETMEM_MODE_MASK
37 
38 static bool secretmem_enable __ro_after_init = 1;
39 module_param_named(enable, secretmem_enable, bool, 0400);
40 MODULE_PARM_DESC(secretmem_enable,
41 		 "Enable secretmem and memfd_secret(2) system call");
42 
43 static atomic_t secretmem_users;
44 
45 bool secretmem_active(void)
46 {
47 	return !!atomic_read(&secretmem_users);
48 }
49 
50 static vm_fault_t secretmem_fault(struct vm_fault *vmf)
51 {
52 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
53 	struct inode *inode = file_inode(vmf->vma->vm_file);
54 	pgoff_t offset = vmf->pgoff;
55 	gfp_t gfp = vmf->gfp_mask;
56 	unsigned long addr;
57 	struct folio *folio;
58 	vm_fault_t ret;
59 	int err;
60 
61 	if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode))
62 		return vmf_error(-EINVAL);
63 
64 	filemap_invalidate_lock_shared(mapping);
65 
66 retry:
67 	folio = filemap_lock_folio(mapping, offset);
68 	if (IS_ERR(folio)) {
69 		folio = folio_alloc(gfp | __GFP_ZERO, 0);
70 		if (!folio) {
71 			ret = VM_FAULT_OOM;
72 			goto out;
73 		}
74 
75 		err = set_direct_map_invalid_noflush(folio_page(folio, 0));
76 		if (err) {
77 			folio_put(folio);
78 			ret = vmf_error(err);
79 			goto out;
80 		}
81 
82 		__folio_mark_uptodate(folio);
83 		err = filemap_add_folio(mapping, folio, offset, gfp);
84 		if (unlikely(err)) {
85 			/*
86 			 * If a split of large page was required, it
87 			 * already happened when we marked the page invalid
88 			 * which guarantees that this call won't fail
89 			 */
90 			set_direct_map_default_noflush(folio_page(folio, 0));
91 			folio_put(folio);
92 			if (err == -EEXIST)
93 				goto retry;
94 
95 			ret = vmf_error(err);
96 			goto out;
97 		}
98 
99 		addr = (unsigned long)folio_address(folio);
100 		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
101 	}
102 
103 	vmf->page = folio_file_page(folio, vmf->pgoff);
104 	ret = VM_FAULT_LOCKED;
105 
106 out:
107 	filemap_invalidate_unlock_shared(mapping);
108 	return ret;
109 }
110 
111 static const struct vm_operations_struct secretmem_vm_ops = {
112 	.fault = secretmem_fault,
113 };
114 
115 static int secretmem_release(struct inode *inode, struct file *file)
116 {
117 	atomic_dec(&secretmem_users);
118 	return 0;
119 }
120 
121 static int secretmem_mmap_prepare(struct vm_area_desc *desc)
122 {
123 	const unsigned long len = vma_desc_size(desc);
124 
125 	if (!vma_desc_test_flags(desc, VMA_SHARED_BIT, VMA_MAYSHARE_BIT))
126 		return -EINVAL;
127 
128 	vma_desc_set_flags(desc, VMA_LOCKED_BIT, VMA_DONTDUMP_BIT);
129 	if (!mlock_future_ok(desc->mm, /*is_vma_locked=*/ true, len))
130 		return -EAGAIN;
131 	desc->vm_ops = &secretmem_vm_ops;
132 
133 	return 0;
134 }
135 
136 bool vma_is_secretmem(struct vm_area_struct *vma)
137 {
138 	return vma->vm_ops == &secretmem_vm_ops;
139 }
140 
141 static const struct file_operations secretmem_fops = {
142 	.release	= secretmem_release,
143 	.mmap_prepare	= secretmem_mmap_prepare,
144 };
145 
146 static int secretmem_migrate_folio(struct address_space *mapping,
147 		struct folio *dst, struct folio *src, enum migrate_mode mode)
148 {
149 	return -EBUSY;
150 }
151 
152 static void secretmem_free_folio(struct folio *folio)
153 {
154 	set_direct_map_default_noflush(folio_page(folio, 0));
155 	folio_zero_segment(folio, 0, folio_size(folio));
156 }
157 
158 const struct address_space_operations secretmem_aops = {
159 	.dirty_folio	= noop_dirty_folio,
160 	.free_folio	= secretmem_free_folio,
161 	.migrate_folio	= secretmem_migrate_folio,
162 };
163 
164 static int secretmem_setattr(struct mnt_idmap *idmap,
165 			     struct dentry *dentry, struct iattr *iattr)
166 {
167 	struct inode *inode = d_inode(dentry);
168 	struct address_space *mapping = inode->i_mapping;
169 	unsigned int ia_valid = iattr->ia_valid;
170 	int ret;
171 
172 	filemap_invalidate_lock(mapping);
173 
174 	if ((ia_valid & ATTR_SIZE) && inode->i_size)
175 		ret = -EINVAL;
176 	else
177 		ret = simple_setattr(idmap, dentry, iattr);
178 
179 	filemap_invalidate_unlock(mapping);
180 
181 	return ret;
182 }
183 
184 static const struct inode_operations secretmem_iops = {
185 	.setattr = secretmem_setattr,
186 };
187 
188 static struct vfsmount *secretmem_mnt;
189 
190 static struct file *secretmem_file_create(unsigned long flags)
191 {
192 	struct file *file;
193 	struct inode *inode;
194 	const char *anon_name = "[secretmem]";
195 
196 	inode = anon_inode_make_secure_inode(secretmem_mnt->mnt_sb, anon_name, NULL);
197 	if (IS_ERR(inode))
198 		return ERR_CAST(inode);
199 
200 	file = alloc_file_pseudo(inode, secretmem_mnt, "secretmem",
201 				 O_RDWR | O_LARGEFILE, &secretmem_fops);
202 	if (IS_ERR(file))
203 		goto err_free_inode;
204 
205 	mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
206 	mapping_set_unevictable(inode->i_mapping);
207 
208 	inode->i_op = &secretmem_iops;
209 	inode->i_mapping->a_ops = &secretmem_aops;
210 
211 	/* pretend we are a normal file with zero size */
212 	inode->i_mode |= S_IFREG;
213 	inode->i_size = 0;
214 
215 	atomic_inc(&secretmem_users);
216 
217 	return file;
218 
219 err_free_inode:
220 	iput(inode);
221 	return file;
222 }
223 
224 SYSCALL_DEFINE1(memfd_secret, unsigned int, flags)
225 {
226 	/* make sure local flags do not conflict with global fcntl.h */
227 	BUILD_BUG_ON(SECRETMEM_FLAGS_MASK & O_CLOEXEC);
228 
229 	if (!secretmem_enable || !can_set_direct_map())
230 		return -ENOSYS;
231 
232 	if (flags & ~(SECRETMEM_FLAGS_MASK | O_CLOEXEC))
233 		return -EINVAL;
234 	if (atomic_read(&secretmem_users) < 0)
235 		return -ENFILE;
236 
237 	return FD_ADD(flags & O_CLOEXEC, secretmem_file_create(flags));
238 }
239 
240 static int secretmem_init_fs_context(struct fs_context *fc)
241 {
242 	struct pseudo_fs_context *ctx;
243 
244 	ctx = init_pseudo(fc, SECRETMEM_MAGIC);
245 	if (!ctx)
246 		return -ENOMEM;
247 
248 	fc->s_iflags |= SB_I_NOEXEC;
249 	fc->s_iflags |= SB_I_NODEV;
250 	return 0;
251 }
252 
253 static struct file_system_type secretmem_fs = {
254 	.name		= "secretmem",
255 	.init_fs_context = secretmem_init_fs_context,
256 	.kill_sb	= kill_anon_super,
257 };
258 
259 static int __init secretmem_init(void)
260 {
261 	if (!secretmem_enable || !can_set_direct_map())
262 		return 0;
263 
264 	secretmem_mnt = kern_mount(&secretmem_fs);
265 	if (IS_ERR(secretmem_mnt))
266 		return PTR_ERR(secretmem_mnt);
267 
268 	return 0;
269 }
270 fs_initcall(secretmem_init);
271