1 /* SPDX-License-Identifier: GPL-2.0 */
2 /**
3 * Copyright(c) 2016-20 Intel Corporation.
4 *
5 * Contains the software defined data structures for enclaves.
6 */
7 #ifndef _X86_ENCL_H
8 #define _X86_ENCL_H
9
10 #include <linux/cpumask.h>
11 #include <linux/kref.h>
12 #include <linux/list.h>
13 #include <linux/mm_types.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/mutex.h>
16 #include <linux/notifier.h>
17 #include <linux/srcu.h>
18 #include <linux/workqueue.h>
19 #include <linux/xarray.h>
20 #include "sgx.h"
21
22 /* 'desc' bits holding the offset in the VA (version array) page. */
23 #define SGX_ENCL_PAGE_VA_OFFSET_MASK GENMASK_ULL(11, 3)
24
25 /* 'desc' bit marking that the page is being reclaimed. */
26 #define SGX_ENCL_PAGE_BEING_RECLAIMED BIT(3)
27
28 struct sgx_encl_page {
29 unsigned long desc;
30 unsigned long vm_max_prot_bits:8;
31 enum sgx_page_type type:16;
32 struct sgx_epc_page *epc_page;
33 struct sgx_encl *encl;
34 struct sgx_va_page *va_page;
35 };
36
37 enum sgx_encl_flags {
38 SGX_ENCL_IOCTL = BIT(0),
39 SGX_ENCL_DEBUG = BIT(1),
40 SGX_ENCL_CREATED = BIT(2),
41 SGX_ENCL_INITIALIZED = BIT(3),
42 };
43
44 struct sgx_encl_mm {
45 struct sgx_encl *encl;
46 struct mm_struct *mm;
47 struct list_head list;
48 struct mmu_notifier mmu_notifier;
49 };
50
51 struct sgx_encl {
52 unsigned long base;
53 unsigned long size;
54 unsigned long flags;
55 unsigned int page_cnt;
56 unsigned int secs_child_cnt;
57 struct mutex lock;
58 struct xarray page_array;
59 struct sgx_encl_page secs;
60 unsigned long attributes;
61 unsigned long attributes_mask;
62
63 cpumask_t cpumask;
64 struct file *backing;
65 struct kref refcount;
66 struct list_head va_pages;
67 unsigned long mm_list_version;
68 struct list_head mm_list;
69 spinlock_t mm_lock;
70 struct srcu_struct srcu;
71 };
72
73 #define SGX_VA_SLOT_COUNT 512
74
75 struct sgx_va_page {
76 struct sgx_epc_page *epc_page;
77 DECLARE_BITMAP(slots, SGX_VA_SLOT_COUNT);
78 struct list_head list;
79 };
80
81 struct sgx_backing {
82 struct page *contents;
83 struct page *pcmd;
84 unsigned long pcmd_offset;
85 };
86
87 extern const struct vm_operations_struct sgx_vm_ops;
88
sgx_encl_find(struct mm_struct * mm,unsigned long addr,struct vm_area_struct ** vma)89 static inline int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
90 struct vm_area_struct **vma)
91 {
92 struct vm_area_struct *result;
93
94 result = vma_lookup(mm, addr);
95 if (!result || result->vm_ops != &sgx_vm_ops)
96 return -EINVAL;
97
98 *vma = result;
99
100 return 0;
101 }
102
103 int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
104 unsigned long end, unsigned long vm_flags);
105
106 bool current_is_ksgxd(void);
107 void sgx_encl_release(struct kref *ref);
108 int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm);
109 const cpumask_t *sgx_encl_cpumask(struct sgx_encl *encl);
110 int sgx_encl_alloc_backing(struct sgx_encl *encl, unsigned long page_index,
111 struct sgx_backing *backing);
112 void sgx_encl_put_backing(struct sgx_backing *backing);
113 int sgx_encl_test_and_clear_young(struct mm_struct *mm,
114 struct sgx_encl_page *page);
115 struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl,
116 unsigned long offset,
117 u64 secinfo_flags);
118 void sgx_zap_enclave_ptes(struct sgx_encl *encl, unsigned long addr);
119 struct sgx_epc_page *sgx_alloc_va_page(bool reclaim);
120 unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page);
121 void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset);
122 bool sgx_va_page_full(struct sgx_va_page *va_page);
123 void sgx_encl_free_epc_page(struct sgx_epc_page *page);
124 struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
125 unsigned long addr);
126 struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl, bool reclaim);
127 void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page);
128
129 #endif /* _X86_ENCL_H */
130