xref: /linux/tools/testing/selftests/sgx/main.c (revision 33c5aac3bf32c3ef120ad6d2eb5c65ab64a5fec4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*  Copyright(c) 2016-20 Intel Corporation. */
3 
4 #include <cpuid.h>
5 #include <elf.h>
6 #include <errno.h>
7 #include <fcntl.h>
8 #include <stdbool.h>
9 #include <stdio.h>
10 #include <stdint.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <unistd.h>
14 #include <sys/ioctl.h>
15 #include <sys/mman.h>
16 #include <sys/stat.h>
17 #include <sys/time.h>
18 #include <sys/types.h>
19 #include <sys/auxv.h>
20 #include "defines.h"
21 #include "../kselftest_harness.h"
22 #include "main.h"
23 
24 static const uint64_t MAGIC = 0x1122334455667788ULL;
25 static const uint64_t MAGIC2 = 0x8877665544332211ULL;
26 vdso_sgx_enter_enclave_t vdso_sgx_enter_enclave;
27 
28 /*
29  * Security Information (SECINFO) data structure needed by a few SGX
30  * instructions (eg. ENCLU[EACCEPT] and ENCLU[EMODPE]) holds meta-data
31  * about an enclave page. &enum sgx_secinfo_page_state specifies the
32  * secinfo flags used for page state.
33  */
34 enum sgx_secinfo_page_state {
35 	SGX_SECINFO_PENDING = (1 << 3),
36 	SGX_SECINFO_MODIFIED = (1 << 4),
37 	SGX_SECINFO_PR = (1 << 5),
38 };
39 
40 struct vdso_symtab {
41 	Elf64_Sym *elf_symtab;
42 	const char *elf_symstrtab;
43 	Elf64_Word *elf_hashtab;
44 };
45 
46 static Elf64_Dyn *vdso_get_dyntab(void *addr)
47 {
48 	Elf64_Ehdr *ehdr = addr;
49 	Elf64_Phdr *phdrtab = addr + ehdr->e_phoff;
50 	int i;
51 
52 	for (i = 0; i < ehdr->e_phnum; i++)
53 		if (phdrtab[i].p_type == PT_DYNAMIC)
54 			return addr + phdrtab[i].p_offset;
55 
56 	return NULL;
57 }
58 
59 static void *vdso_get_dyn(void *addr, Elf64_Dyn *dyntab, Elf64_Sxword tag)
60 {
61 	int i;
62 
63 	for (i = 0; dyntab[i].d_tag != DT_NULL; i++)
64 		if (dyntab[i].d_tag == tag)
65 			return addr + dyntab[i].d_un.d_ptr;
66 
67 	return NULL;
68 }
69 
70 static bool vdso_get_symtab(void *addr, struct vdso_symtab *symtab)
71 {
72 	Elf64_Dyn *dyntab = vdso_get_dyntab(addr);
73 
74 	symtab->elf_symtab = vdso_get_dyn(addr, dyntab, DT_SYMTAB);
75 	if (!symtab->elf_symtab)
76 		return false;
77 
78 	symtab->elf_symstrtab = vdso_get_dyn(addr, dyntab, DT_STRTAB);
79 	if (!symtab->elf_symstrtab)
80 		return false;
81 
82 	symtab->elf_hashtab = vdso_get_dyn(addr, dyntab, DT_HASH);
83 	if (!symtab->elf_hashtab)
84 		return false;
85 
86 	return true;
87 }
88 
89 static inline int sgx2_supported(void)
90 {
91 	unsigned int eax, ebx, ecx, edx;
92 
93 	__cpuid_count(SGX_CPUID, 0x0, eax, ebx, ecx, edx);
94 
95 	return eax & 0x2;
96 }
97 
98 static unsigned long elf_sym_hash(const char *name)
99 {
100 	unsigned long h = 0, high;
101 
102 	while (*name) {
103 		h = (h << 4) + *name++;
104 		high = h & 0xf0000000;
105 
106 		if (high)
107 			h ^= high >> 24;
108 
109 		h &= ~high;
110 	}
111 
112 	return h;
113 }
114 
115 static Elf64_Sym *vdso_symtab_get(struct vdso_symtab *symtab, const char *name)
116 {
117 	Elf64_Word bucketnum = symtab->elf_hashtab[0];
118 	Elf64_Word *buckettab = &symtab->elf_hashtab[2];
119 	Elf64_Word *chaintab = &symtab->elf_hashtab[2 + bucketnum];
120 	Elf64_Sym *sym;
121 	Elf64_Word i;
122 
123 	for (i = buckettab[elf_sym_hash(name) % bucketnum]; i != STN_UNDEF;
124 	     i = chaintab[i]) {
125 		sym = &symtab->elf_symtab[i];
126 		if (!strcmp(name, &symtab->elf_symstrtab[sym->st_name]))
127 			return sym;
128 	}
129 
130 	return NULL;
131 }
132 
133 /*
134  * Return the offset in the enclave where the TCS segment can be found.
135  * The first RW segment loaded is the TCS.
136  */
137 static off_t encl_get_tcs_offset(struct encl *encl)
138 {
139 	int i;
140 
141 	for (i = 0; i < encl->nr_segments; i++) {
142 		struct encl_segment *seg = &encl->segment_tbl[i];
143 
144 		if (i == 0 && seg->prot == (PROT_READ | PROT_WRITE))
145 			return seg->offset;
146 	}
147 
148 	return -1;
149 }
150 
151 /*
152  * Return the offset in the enclave where the data segment can be found.
153  * The first RW segment loaded is the TCS, skip that to get info on the
154  * data segment.
155  */
156 static off_t encl_get_data_offset(struct encl *encl)
157 {
158 	int i;
159 
160 	for (i = 1; i < encl->nr_segments; i++) {
161 		struct encl_segment *seg = &encl->segment_tbl[i];
162 
163 		if (seg->prot == (PROT_READ | PROT_WRITE))
164 			return seg->offset;
165 	}
166 
167 	return -1;
168 }
169 
170 FIXTURE(enclave) {
171 	struct encl encl;
172 	struct sgx_enclave_run run;
173 };
174 
175 static bool setup_test_encl(unsigned long heap_size, struct encl *encl,
176 			    struct __test_metadata *_metadata)
177 {
178 	Elf64_Sym *sgx_enter_enclave_sym = NULL;
179 	struct vdso_symtab symtab;
180 	struct encl_segment *seg;
181 	char maps_line[256];
182 	FILE *maps_file;
183 	unsigned int i;
184 	void *addr;
185 
186 	if (!encl_load("test_encl.elf", encl, heap_size)) {
187 		encl_delete(encl);
188 		TH_LOG("Failed to load the test enclave.");
189 		return false;
190 	}
191 
192 	if (!encl_measure(encl))
193 		goto err;
194 
195 	if (!encl_build(encl))
196 		goto err;
197 
198 	/*
199 	 * An enclave consumer only must do this.
200 	 */
201 	for (i = 0; i < encl->nr_segments; i++) {
202 		struct encl_segment *seg = &encl->segment_tbl[i];
203 
204 		addr = mmap((void *)encl->encl_base + seg->offset, seg->size,
205 			    seg->prot, MAP_SHARED | MAP_FIXED, encl->fd, 0);
206 		EXPECT_NE(addr, MAP_FAILED);
207 		if (addr == MAP_FAILED)
208 			goto err;
209 	}
210 
211 	/* Get vDSO base address */
212 	addr = (void *)getauxval(AT_SYSINFO_EHDR);
213 	if (!addr)
214 		goto err;
215 
216 	if (!vdso_get_symtab(addr, &symtab))
217 		goto err;
218 
219 	sgx_enter_enclave_sym = vdso_symtab_get(&symtab, "__vdso_sgx_enter_enclave");
220 	if (!sgx_enter_enclave_sym)
221 		goto err;
222 
223 	vdso_sgx_enter_enclave = addr + sgx_enter_enclave_sym->st_value;
224 
225 	return true;
226 
227 err:
228 	for (i = 0; i < encl->nr_segments; i++) {
229 		seg = &encl->segment_tbl[i];
230 
231 		TH_LOG("0x%016lx 0x%016lx 0x%02x", seg->offset, seg->size, seg->prot);
232 	}
233 
234 	maps_file = fopen("/proc/self/maps", "r");
235 	if (maps_file != NULL)  {
236 		while (fgets(maps_line, sizeof(maps_line), maps_file) != NULL) {
237 			maps_line[strlen(maps_line) - 1] = '\0';
238 
239 			if (strstr(maps_line, "/dev/sgx_enclave"))
240 				TH_LOG("%s", maps_line);
241 		}
242 
243 		fclose(maps_file);
244 	}
245 
246 	TH_LOG("Failed to initialize the test enclave.");
247 
248 	encl_delete(encl);
249 
250 	return false;
251 }
252 
253 FIXTURE_SETUP(enclave)
254 {
255 }
256 
257 FIXTURE_TEARDOWN(enclave)
258 {
259 	encl_delete(&self->encl);
260 }
261 
262 #define ENCL_CALL(op, run, clobbered) \
263 	({ \
264 		int ret; \
265 		if ((clobbered)) \
266 			ret = vdso_sgx_enter_enclave((unsigned long)(op), 0, 0, \
267 						     EENTER, 0, 0, (run)); \
268 		else \
269 			ret = sgx_enter_enclave((void *)(op), NULL, 0, EENTER, NULL, NULL, \
270 						(run)); \
271 		ret; \
272 	})
273 
274 #define EXPECT_EEXIT(run) \
275 	do { \
276 		EXPECT_EQ((run)->function, EEXIT); \
277 		if ((run)->function != EEXIT) \
278 			TH_LOG("0x%02x 0x%02x 0x%016llx", (run)->exception_vector, \
279 			       (run)->exception_error_code, (run)->exception_addr); \
280 	} while (0)
281 
282 TEST_F(enclave, unclobbered_vdso)
283 {
284 	struct encl_op_get_from_buf get_op;
285 	struct encl_op_put_to_buf put_op;
286 
287 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
288 
289 	memset(&self->run, 0, sizeof(self->run));
290 	self->run.tcs = self->encl.encl_base;
291 
292 	put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
293 	put_op.value = MAGIC;
294 
295 	EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
296 
297 	EXPECT_EEXIT(&self->run);
298 	EXPECT_EQ(self->run.user_data, 0);
299 
300 	get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
301 	get_op.value = 0;
302 
303 	EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
304 
305 	EXPECT_EQ(get_op.value, MAGIC);
306 	EXPECT_EEXIT(&self->run);
307 	EXPECT_EQ(self->run.user_data, 0);
308 }
309 
310 /*
311  * A section metric is concatenated in a way that @low bits 12-31 define the
312  * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the
313  * metric.
314  */
315 static unsigned long sgx_calc_section_metric(unsigned int low,
316 					     unsigned int high)
317 {
318 	return (low & GENMASK_ULL(31, 12)) +
319 	       ((high & GENMASK_ULL(19, 0)) << 32);
320 }
321 
322 /*
323  * Sum total available physical SGX memory across all EPC sections
324  *
325  * Return: total available physical SGX memory available on system
326  */
327 static unsigned long get_total_epc_mem(void)
328 {
329 	unsigned int eax, ebx, ecx, edx;
330 	unsigned long total_size = 0;
331 	unsigned int type;
332 	int section = 0;
333 
334 	while (true) {
335 		__cpuid_count(SGX_CPUID, section + SGX_CPUID_EPC, eax, ebx, ecx, edx);
336 
337 		type = eax & SGX_CPUID_EPC_MASK;
338 		if (type == SGX_CPUID_EPC_INVALID)
339 			break;
340 
341 		if (type != SGX_CPUID_EPC_SECTION)
342 			break;
343 
344 		total_size += sgx_calc_section_metric(ecx, edx);
345 
346 		section++;
347 	}
348 
349 	return total_size;
350 }
351 
352 TEST_F(enclave, unclobbered_vdso_oversubscribed)
353 {
354 	struct encl_op_get_from_buf get_op;
355 	struct encl_op_put_to_buf put_op;
356 	unsigned long total_mem;
357 
358 	total_mem = get_total_epc_mem();
359 	ASSERT_NE(total_mem, 0);
360 	ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata));
361 
362 	memset(&self->run, 0, sizeof(self->run));
363 	self->run.tcs = self->encl.encl_base;
364 
365 	put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
366 	put_op.value = MAGIC;
367 
368 	EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
369 
370 	EXPECT_EEXIT(&self->run);
371 	EXPECT_EQ(self->run.user_data, 0);
372 
373 	get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
374 	get_op.value = 0;
375 
376 	EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
377 
378 	EXPECT_EQ(get_op.value, MAGIC);
379 	EXPECT_EEXIT(&self->run);
380 	EXPECT_EQ(self->run.user_data, 0);
381 
382 }
383 
384 TEST_F(enclave, clobbered_vdso)
385 {
386 	struct encl_op_get_from_buf get_op;
387 	struct encl_op_put_to_buf put_op;
388 
389 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
390 
391 	memset(&self->run, 0, sizeof(self->run));
392 	self->run.tcs = self->encl.encl_base;
393 
394 	put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
395 	put_op.value = MAGIC;
396 
397 	EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0);
398 
399 	EXPECT_EEXIT(&self->run);
400 	EXPECT_EQ(self->run.user_data, 0);
401 
402 	get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
403 	get_op.value = 0;
404 
405 	EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0);
406 
407 	EXPECT_EQ(get_op.value, MAGIC);
408 	EXPECT_EEXIT(&self->run);
409 	EXPECT_EQ(self->run.user_data, 0);
410 }
411 
412 static int test_handler(long rdi, long rsi, long rdx, long ursp, long r8, long r9,
413 			struct sgx_enclave_run *run)
414 {
415 	run->user_data = 0;
416 
417 	return 0;
418 }
419 
420 TEST_F(enclave, clobbered_vdso_and_user_function)
421 {
422 	struct encl_op_get_from_buf get_op;
423 	struct encl_op_put_to_buf put_op;
424 
425 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
426 
427 	memset(&self->run, 0, sizeof(self->run));
428 	self->run.tcs = self->encl.encl_base;
429 
430 	self->run.user_handler = (__u64)test_handler;
431 	self->run.user_data = 0xdeadbeef;
432 
433 	put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
434 	put_op.value = MAGIC;
435 
436 	EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0);
437 
438 	EXPECT_EEXIT(&self->run);
439 	EXPECT_EQ(self->run.user_data, 0);
440 
441 	get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
442 	get_op.value = 0;
443 
444 	EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0);
445 
446 	EXPECT_EQ(get_op.value, MAGIC);
447 	EXPECT_EEXIT(&self->run);
448 	EXPECT_EQ(self->run.user_data, 0);
449 }
450 
451 /*
452  * Sanity check that it is possible to enter either of the two hardcoded TCS
453  */
454 TEST_F(enclave, tcs_entry)
455 {
456 	struct encl_op_header op;
457 
458 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
459 
460 	memset(&self->run, 0, sizeof(self->run));
461 	self->run.tcs = self->encl.encl_base;
462 
463 	op.type = ENCL_OP_NOP;
464 
465 	EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
466 
467 	EXPECT_EEXIT(&self->run);
468 	EXPECT_EQ(self->run.exception_vector, 0);
469 	EXPECT_EQ(self->run.exception_error_code, 0);
470 	EXPECT_EQ(self->run.exception_addr, 0);
471 
472 	/* Move to the next TCS. */
473 	self->run.tcs = self->encl.encl_base + PAGE_SIZE;
474 
475 	EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
476 
477 	EXPECT_EEXIT(&self->run);
478 	EXPECT_EQ(self->run.exception_vector, 0);
479 	EXPECT_EQ(self->run.exception_error_code, 0);
480 	EXPECT_EQ(self->run.exception_addr, 0);
481 }
482 
483 /*
484  * Second page of .data segment is used to test changing PTE permissions.
485  * This spans the local encl_buffer within the test enclave.
486  *
487  * 1) Start with a sanity check: a value is written to the target page within
488  *    the enclave and read back to ensure target page can be written to.
489  * 2) Change PTE permissions (RW -> RO) of target page within enclave.
490  * 3) Repeat (1) - this time expecting a regular #PF communicated via the
491  *    vDSO.
492  * 4) Change PTE permissions of target page within enclave back to be RW.
493  * 5) Repeat (1) by resuming enclave, now expected to be possible to write to
494  *    and read from target page within enclave.
495  */
496 TEST_F(enclave, pte_permissions)
497 {
498 	struct encl_op_get_from_addr get_addr_op;
499 	struct encl_op_put_to_addr put_addr_op;
500 	unsigned long data_start;
501 	int ret;
502 
503 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
504 
505 	memset(&self->run, 0, sizeof(self->run));
506 	self->run.tcs = self->encl.encl_base;
507 
508 	data_start = self->encl.encl_base +
509 		     encl_get_data_offset(&self->encl) +
510 		     PAGE_SIZE;
511 
512 	/*
513 	 * Sanity check to ensure it is possible to write to page that will
514 	 * have its permissions manipulated.
515 	 */
516 
517 	/* Write MAGIC to page */
518 	put_addr_op.value = MAGIC;
519 	put_addr_op.addr = data_start;
520 	put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
521 
522 	EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
523 
524 	EXPECT_EEXIT(&self->run);
525 	EXPECT_EQ(self->run.exception_vector, 0);
526 	EXPECT_EQ(self->run.exception_error_code, 0);
527 	EXPECT_EQ(self->run.exception_addr, 0);
528 
529 	/*
530 	 * Read memory that was just written to, confirming that it is the
531 	 * value previously written (MAGIC).
532 	 */
533 	get_addr_op.value = 0;
534 	get_addr_op.addr = data_start;
535 	get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
536 
537 	EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
538 
539 	EXPECT_EQ(get_addr_op.value, MAGIC);
540 	EXPECT_EEXIT(&self->run);
541 	EXPECT_EQ(self->run.exception_vector, 0);
542 	EXPECT_EQ(self->run.exception_error_code, 0);
543 	EXPECT_EQ(self->run.exception_addr, 0);
544 
545 	/* Change PTE permissions of target page within the enclave */
546 	ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ);
547 	if (ret)
548 		perror("mprotect");
549 
550 	/*
551 	 * PTE permissions of target page changed to read-only, EPCM
552 	 * permissions unchanged (EPCM permissions are RW), attempt to
553 	 * write to the page, expecting a regular #PF.
554 	 */
555 
556 	put_addr_op.value = MAGIC2;
557 
558 	EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
559 
560 	EXPECT_EQ(self->run.exception_vector, 14);
561 	EXPECT_EQ(self->run.exception_error_code, 0x7);
562 	EXPECT_EQ(self->run.exception_addr, data_start);
563 
564 	self->run.exception_vector = 0;
565 	self->run.exception_error_code = 0;
566 	self->run.exception_addr = 0;
567 
568 	/*
569 	 * Change PTE permissions back to enable enclave to write to the
570 	 * target page and resume enclave - do not expect any exceptions this
571 	 * time.
572 	 */
573 	ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ | PROT_WRITE);
574 	if (ret)
575 		perror("mprotect");
576 
577 	EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0,
578 					 0, ERESUME, 0, 0, &self->run),
579 		 0);
580 
581 	EXPECT_EEXIT(&self->run);
582 	EXPECT_EQ(self->run.exception_vector, 0);
583 	EXPECT_EQ(self->run.exception_error_code, 0);
584 	EXPECT_EQ(self->run.exception_addr, 0);
585 
586 	get_addr_op.value = 0;
587 
588 	EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
589 
590 	EXPECT_EQ(get_addr_op.value, MAGIC2);
591 	EXPECT_EEXIT(&self->run);
592 	EXPECT_EQ(self->run.exception_vector, 0);
593 	EXPECT_EQ(self->run.exception_error_code, 0);
594 	EXPECT_EQ(self->run.exception_addr, 0);
595 }
596 
597 /*
598  * Modifying permissions of TCS page should not be possible.
599  */
600 TEST_F(enclave, tcs_permissions)
601 {
602 	struct sgx_enclave_restrict_permissions ioc;
603 	int ret, errno_save;
604 
605 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
606 
607 	memset(&self->run, 0, sizeof(self->run));
608 	self->run.tcs = self->encl.encl_base;
609 
610 	memset(&ioc, 0, sizeof(ioc));
611 
612 	/*
613 	 * Ensure kernel supports needed ioctl() and system supports needed
614 	 * commands.
615 	 */
616 
617 	ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc);
618 	errno_save = ret == -1 ? errno : 0;
619 
620 	/*
621 	 * Invalid parameters were provided during sanity check,
622 	 * expect command to fail.
623 	 */
624 	ASSERT_EQ(ret, -1);
625 
626 	/* ret == -1 */
627 	if (errno_save == ENOTTY)
628 		SKIP(return,
629 		     "Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()");
630 	else if (errno_save == ENODEV)
631 		SKIP(return, "System does not support SGX2");
632 
633 	/*
634 	 * Attempt to make TCS page read-only. This is not allowed and
635 	 * should be prevented by the kernel.
636 	 */
637 	ioc.offset = encl_get_tcs_offset(&self->encl);
638 	ioc.length = PAGE_SIZE;
639 	ioc.permissions = SGX_SECINFO_R;
640 
641 	ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc);
642 	errno_save = ret == -1 ? errno : 0;
643 
644 	EXPECT_EQ(ret, -1);
645 	EXPECT_EQ(errno_save, EINVAL);
646 	EXPECT_EQ(ioc.result, 0);
647 	EXPECT_EQ(ioc.count, 0);
648 }
649 
650 /*
651  * Enclave page permission test.
652  *
653  * Modify and restore enclave page's EPCM (enclave) permissions from
654  * outside enclave (ENCLS[EMODPR] via kernel) as well as from within
655  * enclave (via ENCLU[EMODPE]). Check for page fault if
656  * VMA allows access but EPCM permissions do not.
657  */
658 TEST_F(enclave, epcm_permissions)
659 {
660 	struct sgx_enclave_restrict_permissions restrict_ioc;
661 	struct encl_op_get_from_addr get_addr_op;
662 	struct encl_op_put_to_addr put_addr_op;
663 	struct encl_op_eaccept eaccept_op;
664 	struct encl_op_emodpe emodpe_op;
665 	unsigned long data_start;
666 	int ret, errno_save;
667 
668 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
669 
670 	memset(&self->run, 0, sizeof(self->run));
671 	self->run.tcs = self->encl.encl_base;
672 
673 	/*
674 	 * Ensure kernel supports needed ioctl() and system supports needed
675 	 * commands.
676 	 */
677 	memset(&restrict_ioc, 0, sizeof(restrict_ioc));
678 
679 	ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS,
680 		    &restrict_ioc);
681 	errno_save = ret == -1 ? errno : 0;
682 
683 	/*
684 	 * Invalid parameters were provided during sanity check,
685 	 * expect command to fail.
686 	 */
687 	ASSERT_EQ(ret, -1);
688 
689 	/* ret == -1 */
690 	if (errno_save == ENOTTY)
691 		SKIP(return,
692 		     "Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()");
693 	else if (errno_save == ENODEV)
694 		SKIP(return, "System does not support SGX2");
695 
696 	/*
697 	 * Page that will have its permissions changed is the second data
698 	 * page in the .data segment. This forms part of the local encl_buffer
699 	 * within the enclave.
700 	 *
701 	 * At start of test @data_start should have EPCM as well as PTE and
702 	 * VMA permissions of RW.
703 	 */
704 
705 	data_start = self->encl.encl_base +
706 		     encl_get_data_offset(&self->encl) + PAGE_SIZE;
707 
708 	/*
709 	 * Sanity check that page at @data_start is writable before making
710 	 * any changes to page permissions.
711 	 *
712 	 * Start by writing MAGIC to test page.
713 	 */
714 	put_addr_op.value = MAGIC;
715 	put_addr_op.addr = data_start;
716 	put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
717 
718 	EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
719 
720 	EXPECT_EEXIT(&self->run);
721 	EXPECT_EQ(self->run.exception_vector, 0);
722 	EXPECT_EQ(self->run.exception_error_code, 0);
723 	EXPECT_EQ(self->run.exception_addr, 0);
724 
725 	/*
726 	 * Read memory that was just written to, confirming that
727 	 * page is writable.
728 	 */
729 	get_addr_op.value = 0;
730 	get_addr_op.addr = data_start;
731 	get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
732 
733 	EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
734 
735 	EXPECT_EQ(get_addr_op.value, MAGIC);
736 	EXPECT_EEXIT(&self->run);
737 	EXPECT_EQ(self->run.exception_vector, 0);
738 	EXPECT_EQ(self->run.exception_error_code, 0);
739 	EXPECT_EQ(self->run.exception_addr, 0);
740 
741 	/*
742 	 * Change EPCM permissions to read-only. Kernel still considers
743 	 * the page writable.
744 	 */
745 	memset(&restrict_ioc, 0, sizeof(restrict_ioc));
746 
747 	restrict_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
748 	restrict_ioc.length = PAGE_SIZE;
749 	restrict_ioc.permissions = SGX_SECINFO_R;
750 
751 	ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS,
752 		    &restrict_ioc);
753 	errno_save = ret == -1 ? errno : 0;
754 
755 	EXPECT_EQ(ret, 0);
756 	EXPECT_EQ(errno_save, 0);
757 	EXPECT_EQ(restrict_ioc.result, 0);
758 	EXPECT_EQ(restrict_ioc.count, 4096);
759 
760 	/*
761 	 * EPCM permissions changed from kernel, need to EACCEPT from enclave.
762 	 */
763 	eaccept_op.epc_addr = data_start;
764 	eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_REG | SGX_SECINFO_PR;
765 	eaccept_op.ret = 0;
766 	eaccept_op.header.type = ENCL_OP_EACCEPT;
767 
768 	EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
769 
770 	EXPECT_EEXIT(&self->run);
771 	EXPECT_EQ(self->run.exception_vector, 0);
772 	EXPECT_EQ(self->run.exception_error_code, 0);
773 	EXPECT_EQ(self->run.exception_addr, 0);
774 	EXPECT_EQ(eaccept_op.ret, 0);
775 
776 	/*
777 	 * EPCM permissions of page is now read-only, expect #PF
778 	 * on EPCM when attempting to write to page from within enclave.
779 	 */
780 	put_addr_op.value = MAGIC2;
781 
782 	EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
783 
784 	EXPECT_EQ(self->run.function, ERESUME);
785 	EXPECT_EQ(self->run.exception_vector, 14);
786 	EXPECT_EQ(self->run.exception_error_code, 0x8007);
787 	EXPECT_EQ(self->run.exception_addr, data_start);
788 
789 	self->run.exception_vector = 0;
790 	self->run.exception_error_code = 0;
791 	self->run.exception_addr = 0;
792 
793 	/*
794 	 * Received AEX but cannot return to enclave at same entrypoint,
795 	 * need different TCS from where EPCM permission can be made writable
796 	 * again.
797 	 */
798 	self->run.tcs = self->encl.encl_base + PAGE_SIZE;
799 
800 	/*
801 	 * Enter enclave at new TCS to change EPCM permissions to be
802 	 * writable again and thus fix the page fault that triggered the
803 	 * AEX.
804 	 */
805 
806 	emodpe_op.epc_addr = data_start;
807 	emodpe_op.flags = SGX_SECINFO_R | SGX_SECINFO_W;
808 	emodpe_op.header.type = ENCL_OP_EMODPE;
809 
810 	EXPECT_EQ(ENCL_CALL(&emodpe_op, &self->run, true), 0);
811 
812 	EXPECT_EEXIT(&self->run);
813 	EXPECT_EQ(self->run.exception_vector, 0);
814 	EXPECT_EQ(self->run.exception_error_code, 0);
815 	EXPECT_EQ(self->run.exception_addr, 0);
816 
817 	/*
818 	 * Attempt to return to main TCS to resume execution at faulting
819 	 * instruction, PTE should continue to allow writing to the page.
820 	 */
821 	self->run.tcs = self->encl.encl_base;
822 
823 	/*
824 	 * Wrong page permissions that caused original fault has
825 	 * now been fixed via EPCM permissions.
826 	 * Resume execution in main TCS to re-attempt the memory access.
827 	 */
828 	self->run.tcs = self->encl.encl_base;
829 
830 	EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0,
831 					 ERESUME, 0, 0,
832 					 &self->run),
833 		  0);
834 
835 	EXPECT_EEXIT(&self->run);
836 	EXPECT_EQ(self->run.exception_vector, 0);
837 	EXPECT_EQ(self->run.exception_error_code, 0);
838 	EXPECT_EQ(self->run.exception_addr, 0);
839 
840 	get_addr_op.value = 0;
841 
842 	EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
843 
844 	EXPECT_EQ(get_addr_op.value, MAGIC2);
845 	EXPECT_EEXIT(&self->run);
846 	EXPECT_EQ(self->run.user_data, 0);
847 	EXPECT_EQ(self->run.exception_vector, 0);
848 	EXPECT_EQ(self->run.exception_error_code, 0);
849 	EXPECT_EQ(self->run.exception_addr, 0);
850 }
851 
852 /*
853  * Test the addition of pages to an initialized enclave via writing to
854  * a page belonging to the enclave's address space but was not added
855  * during enclave creation.
856  */
857 TEST_F(enclave, augment)
858 {
859 	struct encl_op_get_from_addr get_addr_op;
860 	struct encl_op_put_to_addr put_addr_op;
861 	struct encl_op_eaccept eaccept_op;
862 	size_t total_size = 0;
863 	void *addr;
864 	int i;
865 
866 	if (!sgx2_supported())
867 		SKIP(return, "SGX2 not supported");
868 
869 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
870 
871 	memset(&self->run, 0, sizeof(self->run));
872 	self->run.tcs = self->encl.encl_base;
873 
874 	for (i = 0; i < self->encl.nr_segments; i++) {
875 		struct encl_segment *seg = &self->encl.segment_tbl[i];
876 
877 		total_size += seg->size;
878 	}
879 
880 	/*
881 	 * Actual enclave size is expected to be larger than the loaded
882 	 * test enclave since enclave size must be a power of 2 in bytes
883 	 * and test_encl does not consume it all.
884 	 */
885 	EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size);
886 
887 	/*
888 	 * Create memory mapping for the page that will be added. New
889 	 * memory mapping is for one page right after all existing
890 	 * mappings.
891 	 * Kernel will allow new mapping using any permissions if it
892 	 * falls into the enclave's address range but not backed
893 	 * by existing enclave pages.
894 	 */
895 	addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE,
896 		    PROT_READ | PROT_WRITE | PROT_EXEC,
897 		    MAP_SHARED | MAP_FIXED, self->encl.fd, 0);
898 	EXPECT_NE(addr, MAP_FAILED);
899 
900 	self->run.exception_vector = 0;
901 	self->run.exception_error_code = 0;
902 	self->run.exception_addr = 0;
903 
904 	/*
905 	 * Attempt to write to the new page from within enclave.
906 	 * Expected to fail since page is not (yet) part of the enclave.
907 	 * The first #PF will trigger the addition of the page to the
908 	 * enclave, but since the new page needs an EACCEPT from within the
909 	 * enclave before it can be used it would not be possible
910 	 * to successfully return to the failing instruction. This is the
911 	 * cause of the second #PF captured here having the SGX bit set,
912 	 * it is from hardware preventing the page from being used.
913 	 */
914 	put_addr_op.value = MAGIC;
915 	put_addr_op.addr = (unsigned long)addr;
916 	put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
917 
918 	EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
919 
920 	EXPECT_EQ(self->run.function, ERESUME);
921 	EXPECT_EQ(self->run.exception_vector, 14);
922 	EXPECT_EQ(self->run.exception_addr, (unsigned long)addr);
923 
924 	if (self->run.exception_error_code == 0x6) {
925 		munmap(addr, PAGE_SIZE);
926 		SKIP(return, "Kernel does not support adding pages to initialized enclave");
927 	}
928 
929 	EXPECT_EQ(self->run.exception_error_code, 0x8007);
930 
931 	self->run.exception_vector = 0;
932 	self->run.exception_error_code = 0;
933 	self->run.exception_addr = 0;
934 
935 	/* Handle AEX by running EACCEPT from new entry point. */
936 	self->run.tcs = self->encl.encl_base + PAGE_SIZE;
937 
938 	eaccept_op.epc_addr = self->encl.encl_base + total_size;
939 	eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
940 	eaccept_op.ret = 0;
941 	eaccept_op.header.type = ENCL_OP_EACCEPT;
942 
943 	EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
944 
945 	EXPECT_EEXIT(&self->run);
946 	EXPECT_EQ(self->run.exception_vector, 0);
947 	EXPECT_EQ(self->run.exception_error_code, 0);
948 	EXPECT_EQ(self->run.exception_addr, 0);
949 	EXPECT_EQ(eaccept_op.ret, 0);
950 
951 	/* Can now return to main TCS to resume execution. */
952 	self->run.tcs = self->encl.encl_base;
953 
954 	EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0,
955 					 ERESUME, 0, 0,
956 					 &self->run),
957 		  0);
958 
959 	EXPECT_EEXIT(&self->run);
960 	EXPECT_EQ(self->run.exception_vector, 0);
961 	EXPECT_EQ(self->run.exception_error_code, 0);
962 	EXPECT_EQ(self->run.exception_addr, 0);
963 
964 	/*
965 	 * Read memory from newly added page that was just written to,
966 	 * confirming that data previously written (MAGIC) is present.
967 	 */
968 	get_addr_op.value = 0;
969 	get_addr_op.addr = (unsigned long)addr;
970 	get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
971 
972 	EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
973 
974 	EXPECT_EQ(get_addr_op.value, MAGIC);
975 	EXPECT_EEXIT(&self->run);
976 	EXPECT_EQ(self->run.exception_vector, 0);
977 	EXPECT_EQ(self->run.exception_error_code, 0);
978 	EXPECT_EQ(self->run.exception_addr, 0);
979 
980 	munmap(addr, PAGE_SIZE);
981 }
982 
983 /*
984  * Test for the addition of pages to an initialized enclave via a
985  * pre-emptive run of EACCEPT on page to be added.
986  */
987 TEST_F(enclave, augment_via_eaccept)
988 {
989 	struct encl_op_get_from_addr get_addr_op;
990 	struct encl_op_put_to_addr put_addr_op;
991 	struct encl_op_eaccept eaccept_op;
992 	size_t total_size = 0;
993 	void *addr;
994 	int i;
995 
996 	if (!sgx2_supported())
997 		SKIP(return, "SGX2 not supported");
998 
999 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
1000 
1001 	memset(&self->run, 0, sizeof(self->run));
1002 	self->run.tcs = self->encl.encl_base;
1003 
1004 	for (i = 0; i < self->encl.nr_segments; i++) {
1005 		struct encl_segment *seg = &self->encl.segment_tbl[i];
1006 
1007 		total_size += seg->size;
1008 	}
1009 
1010 	/*
1011 	 * Actual enclave size is expected to be larger than the loaded
1012 	 * test enclave since enclave size must be a power of 2 in bytes while
1013 	 * test_encl does not consume it all.
1014 	 */
1015 	EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size);
1016 
1017 	/*
1018 	 * mmap() a page at end of existing enclave to be used for dynamic
1019 	 * EPC page.
1020 	 *
1021 	 * Kernel will allow new mapping using any permissions if it
1022 	 * falls into the enclave's address range but not backed
1023 	 * by existing enclave pages.
1024 	 */
1025 
1026 	addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE,
1027 		    PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | MAP_FIXED,
1028 		    self->encl.fd, 0);
1029 	EXPECT_NE(addr, MAP_FAILED);
1030 
1031 	self->run.exception_vector = 0;
1032 	self->run.exception_error_code = 0;
1033 	self->run.exception_addr = 0;
1034 
1035 	/*
1036 	 * Run EACCEPT on new page to trigger the #PF->EAUG->EACCEPT(again
1037 	 * without a #PF). All should be transparent to userspace.
1038 	 */
1039 	eaccept_op.epc_addr = self->encl.encl_base + total_size;
1040 	eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
1041 	eaccept_op.ret = 0;
1042 	eaccept_op.header.type = ENCL_OP_EACCEPT;
1043 
1044 	EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1045 
1046 	if (self->run.exception_vector == 14 &&
1047 	    self->run.exception_error_code == 4 &&
1048 	    self->run.exception_addr == self->encl.encl_base + total_size) {
1049 		munmap(addr, PAGE_SIZE);
1050 		SKIP(return, "Kernel does not support adding pages to initialized enclave");
1051 	}
1052 
1053 	EXPECT_EEXIT(&self->run);
1054 	EXPECT_EQ(self->run.exception_vector, 0);
1055 	EXPECT_EQ(self->run.exception_error_code, 0);
1056 	EXPECT_EQ(self->run.exception_addr, 0);
1057 	EXPECT_EQ(eaccept_op.ret, 0);
1058 
1059 	/*
1060 	 * New page should be accessible from within enclave - attempt to
1061 	 * write to it.
1062 	 */
1063 	put_addr_op.value = MAGIC;
1064 	put_addr_op.addr = (unsigned long)addr;
1065 	put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
1066 
1067 	EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
1068 
1069 	EXPECT_EEXIT(&self->run);
1070 	EXPECT_EQ(self->run.exception_vector, 0);
1071 	EXPECT_EQ(self->run.exception_error_code, 0);
1072 	EXPECT_EQ(self->run.exception_addr, 0);
1073 
1074 	/*
1075 	 * Read memory from newly added page that was just written to,
1076 	 * confirming that data previously written (MAGIC) is present.
1077 	 */
1078 	get_addr_op.value = 0;
1079 	get_addr_op.addr = (unsigned long)addr;
1080 	get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
1081 
1082 	EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
1083 
1084 	EXPECT_EQ(get_addr_op.value, MAGIC);
1085 	EXPECT_EEXIT(&self->run);
1086 	EXPECT_EQ(self->run.exception_vector, 0);
1087 	EXPECT_EQ(self->run.exception_error_code, 0);
1088 	EXPECT_EQ(self->run.exception_addr, 0);
1089 
1090 	munmap(addr, PAGE_SIZE);
1091 }
1092 
1093 /*
1094  * SGX2 page type modification test in two phases:
1095  * Phase 1:
1096  * Create a new TCS, consisting out of three new pages (stack page with regular
1097  * page type, SSA page with regular page type, and TCS page with TCS page
1098  * type) in an initialized enclave and run a simple workload within it.
1099  * Phase 2:
1100  * Remove the three pages added in phase 1, add a new regular page at the
1101  * same address that previously hosted the TCS page and verify that it can
1102  * be modified.
1103  */
1104 TEST_F(enclave, tcs_create)
1105 {
1106 	struct encl_op_init_tcs_page init_tcs_page_op;
1107 	struct sgx_enclave_remove_pages remove_ioc;
1108 	struct encl_op_get_from_addr get_addr_op;
1109 	struct sgx_enclave_modify_types modt_ioc;
1110 	struct encl_op_put_to_addr put_addr_op;
1111 	struct encl_op_get_from_buf get_buf_op;
1112 	struct encl_op_put_to_buf put_buf_op;
1113 	void *addr, *tcs, *stack_end, *ssa;
1114 	struct encl_op_eaccept eaccept_op;
1115 	size_t total_size = 0;
1116 	uint64_t val_64;
1117 	int errno_save;
1118 	int ret, i;
1119 
1120 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl,
1121 				    _metadata));
1122 
1123 	memset(&self->run, 0, sizeof(self->run));
1124 	self->run.tcs = self->encl.encl_base;
1125 
1126 	/*
1127 	 * Hardware (SGX2) and kernel support is needed for this test. Start
1128 	 * with check that test has a chance of succeeding.
1129 	 */
1130 	memset(&modt_ioc, 0, sizeof(modt_ioc));
1131 	ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
1132 
1133 	if (ret == -1) {
1134 		if (errno == ENOTTY)
1135 			SKIP(return,
1136 			     "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
1137 		else if (errno == ENODEV)
1138 			SKIP(return, "System does not support SGX2");
1139 	}
1140 
1141 	/*
1142 	 * Invalid parameters were provided during sanity check,
1143 	 * expect command to fail.
1144 	 */
1145 	EXPECT_EQ(ret, -1);
1146 
1147 	/*
1148 	 * Add three regular pages via EAUG: one will be the TCS stack, one
1149 	 * will be the TCS SSA, and one will be the new TCS. The stack and
1150 	 * SSA will remain as regular pages, the TCS page will need its
1151 	 * type changed after populated with needed data.
1152 	 */
1153 	for (i = 0; i < self->encl.nr_segments; i++) {
1154 		struct encl_segment *seg = &self->encl.segment_tbl[i];
1155 
1156 		total_size += seg->size;
1157 	}
1158 
1159 	/*
1160 	 * Actual enclave size is expected to be larger than the loaded
1161 	 * test enclave since enclave size must be a power of 2 in bytes while
1162 	 * test_encl does not consume it all.
1163 	 */
1164 	EXPECT_LT(total_size + 3 * PAGE_SIZE, self->encl.encl_size);
1165 
1166 	/*
1167 	 * mmap() three pages at end of existing enclave to be used for the
1168 	 * three new pages.
1169 	 */
1170 	addr = mmap((void *)self->encl.encl_base + total_size, 3 * PAGE_SIZE,
1171 		    PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED,
1172 		    self->encl.fd, 0);
1173 	EXPECT_NE(addr, MAP_FAILED);
1174 
1175 	self->run.exception_vector = 0;
1176 	self->run.exception_error_code = 0;
1177 	self->run.exception_addr = 0;
1178 
1179 	stack_end = (void *)self->encl.encl_base + total_size;
1180 	tcs = (void *)self->encl.encl_base + total_size + PAGE_SIZE;
1181 	ssa = (void *)self->encl.encl_base + total_size + 2 * PAGE_SIZE;
1182 
1183 	/*
1184 	 * Run EACCEPT on each new page to trigger the
1185 	 * EACCEPT->(#PF)->EAUG->EACCEPT(again without a #PF) flow.
1186 	 */
1187 
1188 	eaccept_op.epc_addr = (unsigned long)stack_end;
1189 	eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
1190 	eaccept_op.ret = 0;
1191 	eaccept_op.header.type = ENCL_OP_EACCEPT;
1192 
1193 	EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1194 
1195 	if (self->run.exception_vector == 14 &&
1196 	    self->run.exception_error_code == 4 &&
1197 	    self->run.exception_addr == (unsigned long)stack_end) {
1198 		munmap(addr, 3 * PAGE_SIZE);
1199 		SKIP(return, "Kernel does not support adding pages to initialized enclave");
1200 	}
1201 
1202 	EXPECT_EEXIT(&self->run);
1203 	EXPECT_EQ(self->run.exception_vector, 0);
1204 	EXPECT_EQ(self->run.exception_error_code, 0);
1205 	EXPECT_EQ(self->run.exception_addr, 0);
1206 	EXPECT_EQ(eaccept_op.ret, 0);
1207 
1208 	eaccept_op.epc_addr = (unsigned long)ssa;
1209 
1210 	EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1211 
1212 	EXPECT_EEXIT(&self->run);
1213 	EXPECT_EQ(self->run.exception_vector, 0);
1214 	EXPECT_EQ(self->run.exception_error_code, 0);
1215 	EXPECT_EQ(self->run.exception_addr, 0);
1216 	EXPECT_EQ(eaccept_op.ret, 0);
1217 
1218 	eaccept_op.epc_addr = (unsigned long)tcs;
1219 
1220 	EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1221 
1222 	EXPECT_EEXIT(&self->run);
1223 	EXPECT_EQ(self->run.exception_vector, 0);
1224 	EXPECT_EQ(self->run.exception_error_code, 0);
1225 	EXPECT_EQ(self->run.exception_addr, 0);
1226 	EXPECT_EQ(eaccept_op.ret, 0);
1227 
1228 	/*
1229 	 * Three new pages added to enclave. Now populate the TCS page with
1230 	 * needed data. This should be done from within enclave. Provide
1231 	 * the function that will do the actual data population with needed
1232 	 * data.
1233 	 */
1234 
1235 	/*
1236 	 * New TCS will use the "encl_dyn_entry" entrypoint that expects
1237 	 * stack to begin in page before TCS page.
1238 	 */
1239 	val_64 = encl_get_entry(&self->encl, "encl_dyn_entry");
1240 	EXPECT_NE(val_64, 0);
1241 
1242 	init_tcs_page_op.tcs_page = (unsigned long)tcs;
1243 	init_tcs_page_op.ssa = (unsigned long)total_size + 2 * PAGE_SIZE;
1244 	init_tcs_page_op.entry = val_64;
1245 	init_tcs_page_op.header.type = ENCL_OP_INIT_TCS_PAGE;
1246 
1247 	EXPECT_EQ(ENCL_CALL(&init_tcs_page_op, &self->run, true), 0);
1248 
1249 	EXPECT_EEXIT(&self->run);
1250 	EXPECT_EQ(self->run.exception_vector, 0);
1251 	EXPECT_EQ(self->run.exception_error_code, 0);
1252 	EXPECT_EQ(self->run.exception_addr, 0);
1253 
1254 	/* Change TCS page type to TCS. */
1255 	memset(&modt_ioc, 0, sizeof(modt_ioc));
1256 
1257 	modt_ioc.offset = total_size + PAGE_SIZE;
1258 	modt_ioc.length = PAGE_SIZE;
1259 	modt_ioc.page_type = SGX_PAGE_TYPE_TCS;
1260 
1261 	ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
1262 	errno_save = ret == -1 ? errno : 0;
1263 
1264 	EXPECT_EQ(ret, 0);
1265 	EXPECT_EQ(errno_save, 0);
1266 	EXPECT_EQ(modt_ioc.result, 0);
1267 	EXPECT_EQ(modt_ioc.count, 4096);
1268 
1269 	/* EACCEPT new TCS page from enclave. */
1270 	eaccept_op.epc_addr = (unsigned long)tcs;
1271 	eaccept_op.flags = SGX_SECINFO_TCS | SGX_SECINFO_MODIFIED;
1272 	eaccept_op.ret = 0;
1273 	eaccept_op.header.type = ENCL_OP_EACCEPT;
1274 
1275 	EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1276 
1277 	EXPECT_EEXIT(&self->run);
1278 	EXPECT_EQ(self->run.exception_vector, 0);
1279 	EXPECT_EQ(self->run.exception_error_code, 0);
1280 	EXPECT_EQ(self->run.exception_addr, 0);
1281 	EXPECT_EQ(eaccept_op.ret, 0);
1282 
1283 	/* Run workload from new TCS. */
1284 	self->run.tcs = (unsigned long)tcs;
1285 
1286 	/*
1287 	 * Simple workload to write to data buffer and read value back.
1288 	 */
1289 	put_buf_op.header.type = ENCL_OP_PUT_TO_BUFFER;
1290 	put_buf_op.value = MAGIC;
1291 
1292 	EXPECT_EQ(ENCL_CALL(&put_buf_op, &self->run, true), 0);
1293 
1294 	EXPECT_EEXIT(&self->run);
1295 	EXPECT_EQ(self->run.exception_vector, 0);
1296 	EXPECT_EQ(self->run.exception_error_code, 0);
1297 	EXPECT_EQ(self->run.exception_addr, 0);
1298 
1299 	get_buf_op.header.type = ENCL_OP_GET_FROM_BUFFER;
1300 	get_buf_op.value = 0;
1301 
1302 	EXPECT_EQ(ENCL_CALL(&get_buf_op, &self->run, true), 0);
1303 
1304 	EXPECT_EQ(get_buf_op.value, MAGIC);
1305 	EXPECT_EEXIT(&self->run);
1306 	EXPECT_EQ(self->run.exception_vector, 0);
1307 	EXPECT_EQ(self->run.exception_error_code, 0);
1308 	EXPECT_EQ(self->run.exception_addr, 0);
1309 
1310 	/*
1311 	 * Phase 2 of test:
1312 	 * Remove pages associated with new TCS, create a regular page
1313 	 * where TCS page used to be and verify it can be used as a regular
1314 	 * page.
1315 	 */
1316 
1317 	/* Start page removal by requesting change of page type to PT_TRIM. */
1318 	memset(&modt_ioc, 0, sizeof(modt_ioc));
1319 
1320 	modt_ioc.offset = total_size;
1321 	modt_ioc.length = 3 * PAGE_SIZE;
1322 	modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
1323 
1324 	ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
1325 	errno_save = ret == -1 ? errno : 0;
1326 
1327 	EXPECT_EQ(ret, 0);
1328 	EXPECT_EQ(errno_save, 0);
1329 	EXPECT_EQ(modt_ioc.result, 0);
1330 	EXPECT_EQ(modt_ioc.count, 3 * PAGE_SIZE);
1331 
1332 	/*
1333 	 * Enter enclave via TCS #1 and approve page removal by sending
1334 	 * EACCEPT for each of three removed pages.
1335 	 */
1336 	self->run.tcs = self->encl.encl_base;
1337 
1338 	eaccept_op.epc_addr = (unsigned long)stack_end;
1339 	eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
1340 	eaccept_op.ret = 0;
1341 	eaccept_op.header.type = ENCL_OP_EACCEPT;
1342 
1343 	EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1344 
1345 	EXPECT_EEXIT(&self->run);
1346 	EXPECT_EQ(self->run.exception_vector, 0);
1347 	EXPECT_EQ(self->run.exception_error_code, 0);
1348 	EXPECT_EQ(self->run.exception_addr, 0);
1349 	EXPECT_EQ(eaccept_op.ret, 0);
1350 
1351 	eaccept_op.epc_addr = (unsigned long)tcs;
1352 	eaccept_op.ret = 0;
1353 
1354 	EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1355 
1356 	EXPECT_EEXIT(&self->run);
1357 	EXPECT_EQ(self->run.exception_vector, 0);
1358 	EXPECT_EQ(self->run.exception_error_code, 0);
1359 	EXPECT_EQ(self->run.exception_addr, 0);
1360 	EXPECT_EQ(eaccept_op.ret, 0);
1361 
1362 	eaccept_op.epc_addr = (unsigned long)ssa;
1363 	eaccept_op.ret = 0;
1364 
1365 	EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1366 
1367 	EXPECT_EEXIT(&self->run);
1368 	EXPECT_EQ(self->run.exception_vector, 0);
1369 	EXPECT_EQ(self->run.exception_error_code, 0);
1370 	EXPECT_EQ(self->run.exception_addr, 0);
1371 	EXPECT_EQ(eaccept_op.ret, 0);
1372 
1373 	/* Send final ioctl() to complete page removal. */
1374 	memset(&remove_ioc, 0, sizeof(remove_ioc));
1375 
1376 	remove_ioc.offset = total_size;
1377 	remove_ioc.length = 3 * PAGE_SIZE;
1378 
1379 	ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
1380 	errno_save = ret == -1 ? errno : 0;
1381 
1382 	EXPECT_EQ(ret, 0);
1383 	EXPECT_EQ(errno_save, 0);
1384 	EXPECT_EQ(remove_ioc.count, 3 * PAGE_SIZE);
1385 
1386 	/*
1387 	 * Enter enclave via TCS #1 and access location where TCS #3 was to
1388 	 * trigger dynamic add of regular page at that location.
1389 	 */
1390 	eaccept_op.epc_addr = (unsigned long)tcs;
1391 	eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
1392 	eaccept_op.ret = 0;
1393 	eaccept_op.header.type = ENCL_OP_EACCEPT;
1394 
1395 	EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1396 
1397 	EXPECT_EEXIT(&self->run);
1398 	EXPECT_EQ(self->run.exception_vector, 0);
1399 	EXPECT_EQ(self->run.exception_error_code, 0);
1400 	EXPECT_EQ(self->run.exception_addr, 0);
1401 	EXPECT_EQ(eaccept_op.ret, 0);
1402 
1403 	/*
1404 	 * New page should be accessible from within enclave - write to it.
1405 	 */
1406 	put_addr_op.value = MAGIC;
1407 	put_addr_op.addr = (unsigned long)tcs;
1408 	put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
1409 
1410 	EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
1411 
1412 	EXPECT_EEXIT(&self->run);
1413 	EXPECT_EQ(self->run.exception_vector, 0);
1414 	EXPECT_EQ(self->run.exception_error_code, 0);
1415 	EXPECT_EQ(self->run.exception_addr, 0);
1416 
1417 	/*
1418 	 * Read memory from newly added page that was just written to,
1419 	 * confirming that data previously written (MAGIC) is present.
1420 	 */
1421 	get_addr_op.value = 0;
1422 	get_addr_op.addr = (unsigned long)tcs;
1423 	get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
1424 
1425 	EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
1426 
1427 	EXPECT_EQ(get_addr_op.value, MAGIC);
1428 	EXPECT_EEXIT(&self->run);
1429 	EXPECT_EQ(self->run.exception_vector, 0);
1430 	EXPECT_EQ(self->run.exception_error_code, 0);
1431 	EXPECT_EQ(self->run.exception_addr, 0);
1432 
1433 	munmap(addr, 3 * PAGE_SIZE);
1434 }
1435 
1436 TEST_HARNESS_MAIN
1437