xref: /linux/arch/x86/kernel/cpu/sgx/encl.c (revision 26fbb4c8c7c3ee9a4c3b4de555a8587b5a19154e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*  Copyright(c) 2016-20 Intel Corporation. */
3 
4 #include <linux/lockdep.h>
5 #include <linux/mm.h>
6 #include <linux/mman.h>
7 #include <linux/shmem_fs.h>
8 #include <linux/suspend.h>
9 #include <linux/sched/mm.h>
10 #include "arch.h"
11 #include "encl.h"
12 #include "encls.h"
13 #include "sgx.h"
14 
15 /*
16  * ELDU: Load an EPC page as unblocked. For more info, see "OS Management of EPC
17  * Pages" in the SDM.
18  */
19 static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
20 			   struct sgx_epc_page *epc_page,
21 			   struct sgx_epc_page *secs_page)
22 {
23 	unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
24 	struct sgx_encl *encl = encl_page->encl;
25 	struct sgx_pageinfo pginfo;
26 	struct sgx_backing b;
27 	pgoff_t page_index;
28 	int ret;
29 
30 	if (secs_page)
31 		page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base);
32 	else
33 		page_index = PFN_DOWN(encl->size);
34 
35 	ret = sgx_encl_get_backing(encl, page_index, &b);
36 	if (ret)
37 		return ret;
38 
39 	pginfo.addr = encl_page->desc & PAGE_MASK;
40 	pginfo.contents = (unsigned long)kmap_atomic(b.contents);
41 	pginfo.metadata = (unsigned long)kmap_atomic(b.pcmd) +
42 			  b.pcmd_offset;
43 
44 	if (secs_page)
45 		pginfo.secs = (u64)sgx_get_epc_virt_addr(secs_page);
46 	else
47 		pginfo.secs = 0;
48 
49 	ret = __eldu(&pginfo, sgx_get_epc_virt_addr(epc_page),
50 		     sgx_get_epc_virt_addr(encl_page->va_page->epc_page) + va_offset);
51 	if (ret) {
52 		if (encls_failed(ret))
53 			ENCLS_WARN(ret, "ELDU");
54 
55 		ret = -EFAULT;
56 	}
57 
58 	kunmap_atomic((void *)(unsigned long)(pginfo.metadata - b.pcmd_offset));
59 	kunmap_atomic((void *)(unsigned long)pginfo.contents);
60 
61 	sgx_encl_put_backing(&b, false);
62 
63 	return ret;
64 }
65 
66 static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page,
67 					  struct sgx_epc_page *secs_page)
68 {
69 
70 	unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
71 	struct sgx_encl *encl = encl_page->encl;
72 	struct sgx_epc_page *epc_page;
73 	int ret;
74 
75 	epc_page = sgx_alloc_epc_page(encl_page, false);
76 	if (IS_ERR(epc_page))
77 		return epc_page;
78 
79 	ret = __sgx_encl_eldu(encl_page, epc_page, secs_page);
80 	if (ret) {
81 		sgx_free_epc_page(epc_page);
82 		return ERR_PTR(ret);
83 	}
84 
85 	sgx_free_va_slot(encl_page->va_page, va_offset);
86 	list_move(&encl_page->va_page->list, &encl->va_pages);
87 	encl_page->desc &= ~SGX_ENCL_PAGE_VA_OFFSET_MASK;
88 	encl_page->epc_page = epc_page;
89 
90 	return epc_page;
91 }
92 
93 static struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
94 						unsigned long addr,
95 						unsigned long vm_flags)
96 {
97 	unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
98 	struct sgx_epc_page *epc_page;
99 	struct sgx_encl_page *entry;
100 
101 	entry = xa_load(&encl->page_array, PFN_DOWN(addr));
102 	if (!entry)
103 		return ERR_PTR(-EFAULT);
104 
105 	/*
106 	 * Verify that the faulted page has equal or higher build time
107 	 * permissions than the VMA permissions (i.e. the subset of {VM_READ,
108 	 * VM_WRITE, VM_EXECUTE} in vma->vm_flags).
109 	 */
110 	if ((entry->vm_max_prot_bits & vm_prot_bits) != vm_prot_bits)
111 		return ERR_PTR(-EFAULT);
112 
113 	/* Entry successfully located. */
114 	if (entry->epc_page) {
115 		if (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED)
116 			return ERR_PTR(-EBUSY);
117 
118 		return entry;
119 	}
120 
121 	if (!(encl->secs.epc_page)) {
122 		epc_page = sgx_encl_eldu(&encl->secs, NULL);
123 		if (IS_ERR(epc_page))
124 			return ERR_CAST(epc_page);
125 	}
126 
127 	epc_page = sgx_encl_eldu(entry, encl->secs.epc_page);
128 	if (IS_ERR(epc_page))
129 		return ERR_CAST(epc_page);
130 
131 	encl->secs_child_cnt++;
132 	sgx_mark_page_reclaimable(entry->epc_page);
133 
134 	return entry;
135 }
136 
137 static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
138 {
139 	unsigned long addr = (unsigned long)vmf->address;
140 	struct vm_area_struct *vma = vmf->vma;
141 	struct sgx_encl_page *entry;
142 	unsigned long phys_addr;
143 	struct sgx_encl *encl;
144 	unsigned long pfn;
145 	vm_fault_t ret;
146 
147 	encl = vma->vm_private_data;
148 
149 	/*
150 	 * It's very unlikely but possible that allocating memory for the
151 	 * mm_list entry of a forked process failed in sgx_vma_open(). When
152 	 * this happens, vm_private_data is set to NULL.
153 	 */
154 	if (unlikely(!encl))
155 		return VM_FAULT_SIGBUS;
156 
157 	mutex_lock(&encl->lock);
158 
159 	entry = sgx_encl_load_page(encl, addr, vma->vm_flags);
160 	if (IS_ERR(entry)) {
161 		mutex_unlock(&encl->lock);
162 
163 		if (PTR_ERR(entry) == -EBUSY)
164 			return VM_FAULT_NOPAGE;
165 
166 		return VM_FAULT_SIGBUS;
167 	}
168 
169 	phys_addr = sgx_get_epc_phys_addr(entry->epc_page);
170 
171 	/* Check if another thread got here first to insert the PTE. */
172 	if (!follow_pfn(vma, addr, &pfn)) {
173 		mutex_unlock(&encl->lock);
174 
175 		return VM_FAULT_NOPAGE;
176 	}
177 
178 	ret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys_addr));
179 	if (ret != VM_FAULT_NOPAGE) {
180 		mutex_unlock(&encl->lock);
181 
182 		return VM_FAULT_SIGBUS;
183 	}
184 
185 	sgx_encl_test_and_clear_young(vma->vm_mm, entry);
186 	mutex_unlock(&encl->lock);
187 
188 	return VM_FAULT_NOPAGE;
189 }
190 
191 static void sgx_vma_open(struct vm_area_struct *vma)
192 {
193 	struct sgx_encl *encl = vma->vm_private_data;
194 
195 	/*
196 	 * It's possible but unlikely that vm_private_data is NULL. This can
197 	 * happen in a grandchild of a process, when sgx_encl_mm_add() had
198 	 * failed to allocate memory in this callback.
199 	 */
200 	if (unlikely(!encl))
201 		return;
202 
203 	if (sgx_encl_mm_add(encl, vma->vm_mm))
204 		vma->vm_private_data = NULL;
205 }
206 
207 
208 /**
209  * sgx_encl_may_map() - Check if a requested VMA mapping is allowed
210  * @encl:		an enclave pointer
211  * @start:		lower bound of the address range, inclusive
212  * @end:		upper bound of the address range, exclusive
213  * @vm_flags:		VMA flags
214  *
215  * Iterate through the enclave pages contained within [@start, @end) to verify
216  * that the permissions requested by a subset of {VM_READ, VM_WRITE, VM_EXEC}
217  * do not contain any permissions that are not contained in the build time
218  * permissions of any of the enclave pages within the given address range.
219  *
220  * An enclave creator must declare the strongest permissions that will be
221  * needed for each enclave page. This ensures that mappings have the identical
222  * or weaker permissions than the earlier declared permissions.
223  *
224  * Return: 0 on success, -EACCES otherwise
225  */
226 int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
227 		     unsigned long end, unsigned long vm_flags)
228 {
229 	unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
230 	struct sgx_encl_page *page;
231 	unsigned long count = 0;
232 	int ret = 0;
233 
234 	XA_STATE(xas, &encl->page_array, PFN_DOWN(start));
235 
236 	/*
237 	 * Disallow READ_IMPLIES_EXEC tasks as their VMA permissions might
238 	 * conflict with the enclave page permissions.
239 	 */
240 	if (current->personality & READ_IMPLIES_EXEC)
241 		return -EACCES;
242 
243 	mutex_lock(&encl->lock);
244 	xas_lock(&xas);
245 	xas_for_each(&xas, page, PFN_DOWN(end - 1)) {
246 		if (~page->vm_max_prot_bits & vm_prot_bits) {
247 			ret = -EACCES;
248 			break;
249 		}
250 
251 		/* Reschedule on every XA_CHECK_SCHED iteration. */
252 		if (!(++count % XA_CHECK_SCHED)) {
253 			xas_pause(&xas);
254 			xas_unlock(&xas);
255 			mutex_unlock(&encl->lock);
256 
257 			cond_resched();
258 
259 			mutex_lock(&encl->lock);
260 			xas_lock(&xas);
261 		}
262 	}
263 	xas_unlock(&xas);
264 	mutex_unlock(&encl->lock);
265 
266 	return ret;
267 }
268 
269 static int sgx_vma_mprotect(struct vm_area_struct *vma, unsigned long start,
270 			    unsigned long end, unsigned long newflags)
271 {
272 	return sgx_encl_may_map(vma->vm_private_data, start, end, newflags);
273 }
274 
275 static int sgx_encl_debug_read(struct sgx_encl *encl, struct sgx_encl_page *page,
276 			       unsigned long addr, void *data)
277 {
278 	unsigned long offset = addr & ~PAGE_MASK;
279 	int ret;
280 
281 
282 	ret = __edbgrd(sgx_get_epc_virt_addr(page->epc_page) + offset, data);
283 	if (ret)
284 		return -EIO;
285 
286 	return 0;
287 }
288 
289 static int sgx_encl_debug_write(struct sgx_encl *encl, struct sgx_encl_page *page,
290 				unsigned long addr, void *data)
291 {
292 	unsigned long offset = addr & ~PAGE_MASK;
293 	int ret;
294 
295 	ret = __edbgwr(sgx_get_epc_virt_addr(page->epc_page) + offset, data);
296 	if (ret)
297 		return -EIO;
298 
299 	return 0;
300 }
301 
302 /*
303  * Load an enclave page to EPC if required, and take encl->lock.
304  */
305 static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl,
306 						   unsigned long addr,
307 						   unsigned long vm_flags)
308 {
309 	struct sgx_encl_page *entry;
310 
311 	for ( ; ; ) {
312 		mutex_lock(&encl->lock);
313 
314 		entry = sgx_encl_load_page(encl, addr, vm_flags);
315 		if (PTR_ERR(entry) != -EBUSY)
316 			break;
317 
318 		mutex_unlock(&encl->lock);
319 	}
320 
321 	if (IS_ERR(entry))
322 		mutex_unlock(&encl->lock);
323 
324 	return entry;
325 }
326 
327 static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr,
328 			  void *buf, int len, int write)
329 {
330 	struct sgx_encl *encl = vma->vm_private_data;
331 	struct sgx_encl_page *entry = NULL;
332 	char data[sizeof(unsigned long)];
333 	unsigned long align;
334 	int offset;
335 	int cnt;
336 	int ret = 0;
337 	int i;
338 
339 	/*
340 	 * If process was forked, VMA is still there but vm_private_data is set
341 	 * to NULL.
342 	 */
343 	if (!encl)
344 		return -EFAULT;
345 
346 	if (!test_bit(SGX_ENCL_DEBUG, &encl->flags))
347 		return -EFAULT;
348 
349 	for (i = 0; i < len; i += cnt) {
350 		entry = sgx_encl_reserve_page(encl, (addr + i) & PAGE_MASK,
351 					      vma->vm_flags);
352 		if (IS_ERR(entry)) {
353 			ret = PTR_ERR(entry);
354 			break;
355 		}
356 
357 		align = ALIGN_DOWN(addr + i, sizeof(unsigned long));
358 		offset = (addr + i) & (sizeof(unsigned long) - 1);
359 		cnt = sizeof(unsigned long) - offset;
360 		cnt = min(cnt, len - i);
361 
362 		ret = sgx_encl_debug_read(encl, entry, align, data);
363 		if (ret)
364 			goto out;
365 
366 		if (write) {
367 			memcpy(data + offset, buf + i, cnt);
368 			ret = sgx_encl_debug_write(encl, entry, align, data);
369 			if (ret)
370 				goto out;
371 		} else {
372 			memcpy(buf + i, data + offset, cnt);
373 		}
374 
375 out:
376 		mutex_unlock(&encl->lock);
377 
378 		if (ret)
379 			break;
380 	}
381 
382 	return ret < 0 ? ret : i;
383 }
384 
385 const struct vm_operations_struct sgx_vm_ops = {
386 	.fault = sgx_vma_fault,
387 	.mprotect = sgx_vma_mprotect,
388 	.open = sgx_vma_open,
389 	.access = sgx_vma_access,
390 };
391 
392 /**
393  * sgx_encl_release - Destroy an enclave instance
394  * @kref:	address of a kref inside &sgx_encl
395  *
396  * Used together with kref_put(). Frees all the resources associated with the
397  * enclave and the instance itself.
398  */
399 void sgx_encl_release(struct kref *ref)
400 {
401 	struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount);
402 	struct sgx_va_page *va_page;
403 	struct sgx_encl_page *entry;
404 	unsigned long index;
405 
406 	xa_for_each(&encl->page_array, index, entry) {
407 		if (entry->epc_page) {
408 			/*
409 			 * The page and its radix tree entry cannot be freed
410 			 * if the page is being held by the reclaimer.
411 			 */
412 			if (sgx_unmark_page_reclaimable(entry->epc_page))
413 				continue;
414 
415 			sgx_free_epc_page(entry->epc_page);
416 			encl->secs_child_cnt--;
417 			entry->epc_page = NULL;
418 		}
419 
420 		kfree(entry);
421 	}
422 
423 	xa_destroy(&encl->page_array);
424 
425 	if (!encl->secs_child_cnt && encl->secs.epc_page) {
426 		sgx_free_epc_page(encl->secs.epc_page);
427 		encl->secs.epc_page = NULL;
428 	}
429 
430 	while (!list_empty(&encl->va_pages)) {
431 		va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
432 					   list);
433 		list_del(&va_page->list);
434 		sgx_free_epc_page(va_page->epc_page);
435 		kfree(va_page);
436 	}
437 
438 	if (encl->backing)
439 		fput(encl->backing);
440 
441 	cleanup_srcu_struct(&encl->srcu);
442 
443 	WARN_ON_ONCE(!list_empty(&encl->mm_list));
444 
445 	/* Detect EPC page leak's. */
446 	WARN_ON_ONCE(encl->secs_child_cnt);
447 	WARN_ON_ONCE(encl->secs.epc_page);
448 
449 	kfree(encl);
450 }
451 
452 /*
453  * 'mm' is exiting and no longer needs mmu notifications.
454  */
455 static void sgx_mmu_notifier_release(struct mmu_notifier *mn,
456 				     struct mm_struct *mm)
457 {
458 	struct sgx_encl_mm *encl_mm = container_of(mn, struct sgx_encl_mm, mmu_notifier);
459 	struct sgx_encl_mm *tmp = NULL;
460 
461 	/*
462 	 * The enclave itself can remove encl_mm.  Note, objects can't be moved
463 	 * off an RCU protected list, but deletion is ok.
464 	 */
465 	spin_lock(&encl_mm->encl->mm_lock);
466 	list_for_each_entry(tmp, &encl_mm->encl->mm_list, list) {
467 		if (tmp == encl_mm) {
468 			list_del_rcu(&encl_mm->list);
469 			break;
470 		}
471 	}
472 	spin_unlock(&encl_mm->encl->mm_lock);
473 
474 	if (tmp == encl_mm) {
475 		synchronize_srcu(&encl_mm->encl->srcu);
476 		mmu_notifier_put(mn);
477 	}
478 }
479 
480 static void sgx_mmu_notifier_free(struct mmu_notifier *mn)
481 {
482 	struct sgx_encl_mm *encl_mm = container_of(mn, struct sgx_encl_mm, mmu_notifier);
483 
484 	kfree(encl_mm);
485 }
486 
487 static const struct mmu_notifier_ops sgx_mmu_notifier_ops = {
488 	.release		= sgx_mmu_notifier_release,
489 	.free_notifier		= sgx_mmu_notifier_free,
490 };
491 
492 static struct sgx_encl_mm *sgx_encl_find_mm(struct sgx_encl *encl,
493 					    struct mm_struct *mm)
494 {
495 	struct sgx_encl_mm *encl_mm = NULL;
496 	struct sgx_encl_mm *tmp;
497 	int idx;
498 
499 	idx = srcu_read_lock(&encl->srcu);
500 
501 	list_for_each_entry_rcu(tmp, &encl->mm_list, list) {
502 		if (tmp->mm == mm) {
503 			encl_mm = tmp;
504 			break;
505 		}
506 	}
507 
508 	srcu_read_unlock(&encl->srcu, idx);
509 
510 	return encl_mm;
511 }
512 
513 int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
514 {
515 	struct sgx_encl_mm *encl_mm;
516 	int ret;
517 
518 	/*
519 	 * Even though a single enclave may be mapped into an mm more than once,
520 	 * each 'mm' only appears once on encl->mm_list. This is guaranteed by
521 	 * holding the mm's mmap lock for write before an mm can be added or
522 	 * remove to an encl->mm_list.
523 	 */
524 	mmap_assert_write_locked(mm);
525 
526 	/*
527 	 * It's possible that an entry already exists in the mm_list, because it
528 	 * is removed only on VFS release or process exit.
529 	 */
530 	if (sgx_encl_find_mm(encl, mm))
531 		return 0;
532 
533 	encl_mm = kzalloc(sizeof(*encl_mm), GFP_KERNEL);
534 	if (!encl_mm)
535 		return -ENOMEM;
536 
537 	encl_mm->encl = encl;
538 	encl_mm->mm = mm;
539 	encl_mm->mmu_notifier.ops = &sgx_mmu_notifier_ops;
540 
541 	ret = __mmu_notifier_register(&encl_mm->mmu_notifier, mm);
542 	if (ret) {
543 		kfree(encl_mm);
544 		return ret;
545 	}
546 
547 	spin_lock(&encl->mm_lock);
548 	list_add_rcu(&encl_mm->list, &encl->mm_list);
549 	/* Pairs with smp_rmb() in sgx_reclaimer_block(). */
550 	smp_wmb();
551 	encl->mm_list_version++;
552 	spin_unlock(&encl->mm_lock);
553 
554 	return 0;
555 }
556 
557 static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl,
558 					      pgoff_t index)
559 {
560 	struct inode *inode = encl->backing->f_path.dentry->d_inode;
561 	struct address_space *mapping = inode->i_mapping;
562 	gfp_t gfpmask = mapping_gfp_mask(mapping);
563 
564 	return shmem_read_mapping_page_gfp(mapping, index, gfpmask);
565 }
566 
567 /**
568  * sgx_encl_get_backing() - Pin the backing storage
569  * @encl:	an enclave pointer
570  * @page_index:	enclave page index
571  * @backing:	data for accessing backing storage for the page
572  *
573  * Pin the backing storage pages for storing the encrypted contents and Paging
574  * Crypto MetaData (PCMD) of an enclave page.
575  *
576  * Return:
577  *   0 on success,
578  *   -errno otherwise.
579  */
580 int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
581 			 struct sgx_backing *backing)
582 {
583 	pgoff_t pcmd_index = PFN_DOWN(encl->size) + 1 + (page_index >> 5);
584 	struct page *contents;
585 	struct page *pcmd;
586 
587 	contents = sgx_encl_get_backing_page(encl, page_index);
588 	if (IS_ERR(contents))
589 		return PTR_ERR(contents);
590 
591 	pcmd = sgx_encl_get_backing_page(encl, pcmd_index);
592 	if (IS_ERR(pcmd)) {
593 		put_page(contents);
594 		return PTR_ERR(pcmd);
595 	}
596 
597 	backing->page_index = page_index;
598 	backing->contents = contents;
599 	backing->pcmd = pcmd;
600 	backing->pcmd_offset =
601 		(page_index & (PAGE_SIZE / sizeof(struct sgx_pcmd) - 1)) *
602 		sizeof(struct sgx_pcmd);
603 
604 	return 0;
605 }
606 
607 /**
608  * sgx_encl_put_backing() - Unpin the backing storage
609  * @backing:	data for accessing backing storage for the page
610  * @do_write:	mark pages dirty
611  */
612 void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write)
613 {
614 	if (do_write) {
615 		set_page_dirty(backing->pcmd);
616 		set_page_dirty(backing->contents);
617 	}
618 
619 	put_page(backing->pcmd);
620 	put_page(backing->contents);
621 }
622 
623 static int sgx_encl_test_and_clear_young_cb(pte_t *ptep, unsigned long addr,
624 					    void *data)
625 {
626 	pte_t pte;
627 	int ret;
628 
629 	ret = pte_young(*ptep);
630 	if (ret) {
631 		pte = pte_mkold(*ptep);
632 		set_pte_at((struct mm_struct *)data, addr, ptep, pte);
633 	}
634 
635 	return ret;
636 }
637 
638 /**
639  * sgx_encl_test_and_clear_young() - Test and reset the accessed bit
640  * @mm:		mm_struct that is checked
641  * @page:	enclave page to be tested for recent access
642  *
643  * Checks the Access (A) bit from the PTE corresponding to the enclave page and
644  * clears it.
645  *
646  * Return: 1 if the page has been recently accessed and 0 if not.
647  */
648 int sgx_encl_test_and_clear_young(struct mm_struct *mm,
649 				  struct sgx_encl_page *page)
650 {
651 	unsigned long addr = page->desc & PAGE_MASK;
652 	struct sgx_encl *encl = page->encl;
653 	struct vm_area_struct *vma;
654 	int ret;
655 
656 	ret = sgx_encl_find(mm, addr, &vma);
657 	if (ret)
658 		return 0;
659 
660 	if (encl != vma->vm_private_data)
661 		return 0;
662 
663 	ret = apply_to_page_range(vma->vm_mm, addr, PAGE_SIZE,
664 				  sgx_encl_test_and_clear_young_cb, vma->vm_mm);
665 	if (ret < 0)
666 		return 0;
667 
668 	return ret;
669 }
670 
671 /**
672  * sgx_alloc_va_page() - Allocate a Version Array (VA) page
673  *
674  * Allocate a free EPC page and convert it to a Version Array (VA) page.
675  *
676  * Return:
677  *   a VA page,
678  *   -errno otherwise
679  */
680 struct sgx_epc_page *sgx_alloc_va_page(void)
681 {
682 	struct sgx_epc_page *epc_page;
683 	int ret;
684 
685 	epc_page = sgx_alloc_epc_page(NULL, true);
686 	if (IS_ERR(epc_page))
687 		return ERR_CAST(epc_page);
688 
689 	ret = __epa(sgx_get_epc_virt_addr(epc_page));
690 	if (ret) {
691 		WARN_ONCE(1, "EPA returned %d (0x%x)", ret, ret);
692 		sgx_free_epc_page(epc_page);
693 		return ERR_PTR(-EFAULT);
694 	}
695 
696 	return epc_page;
697 }
698 
699 /**
700  * sgx_alloc_va_slot - allocate a VA slot
701  * @va_page:	a &struct sgx_va_page instance
702  *
703  * Allocates a slot from a &struct sgx_va_page instance.
704  *
705  * Return: offset of the slot inside the VA page
706  */
707 unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page)
708 {
709 	int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT);
710 
711 	if (slot < SGX_VA_SLOT_COUNT)
712 		set_bit(slot, va_page->slots);
713 
714 	return slot << 3;
715 }
716 
717 /**
718  * sgx_free_va_slot - free a VA slot
719  * @va_page:	a &struct sgx_va_page instance
720  * @offset:	offset of the slot inside the VA page
721  *
722  * Frees a slot from a &struct sgx_va_page instance.
723  */
724 void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset)
725 {
726 	clear_bit(offset >> 3, va_page->slots);
727 }
728 
729 /**
730  * sgx_va_page_full - is the VA page full?
731  * @va_page:	a &struct sgx_va_page instance
732  *
733  * Return: true if all slots have been taken
734  */
735 bool sgx_va_page_full(struct sgx_va_page *va_page)
736 {
737 	int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT);
738 
739 	return slot == SGX_VA_SLOT_COUNT;
740 }
741