1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2020-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 */
5
6 /**
7 * DOC: Enclave lifetime management driver for Nitro Enclaves (NE).
8 * Nitro is a hypervisor that has been developed by Amazon.
9 */
10
11 #include <linux/anon_inodes.h>
12 #include <linux/capability.h>
13 #include <linux/cpu.h>
14 #include <linux/device.h>
15 #include <linux/file.h>
16 #include <linux/hugetlb.h>
17 #include <linux/limits.h>
18 #include <linux/list.h>
19 #include <linux/miscdevice.h>
20 #include <linux/mm.h>
21 #include <linux/mman.h>
22 #include <linux/module.h>
23 #include <linux/mutex.h>
24 #include <linux/nitro_enclaves.h>
25 #include <linux/pci.h>
26 #include <linux/poll.h>
27 #include <linux/range.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <uapi/linux/vm_sockets.h>
31
32 #include "ne_misc_dev.h"
33 #include "ne_pci_dev.h"
34
35 /**
36 * NE_CPUS_SIZE - Size for max 128 CPUs, for now, in a cpu-list string, comma
37 * separated. The NE CPU pool includes CPUs from a single NUMA
38 * node.
39 */
40 #define NE_CPUS_SIZE (512)
41
42 /**
43 * NE_EIF_LOAD_OFFSET - The offset where to copy the Enclave Image Format (EIF)
44 * image in enclave memory.
45 */
46 #define NE_EIF_LOAD_OFFSET (8 * 1024UL * 1024UL)
47
48 /**
49 * NE_MIN_ENCLAVE_MEM_SIZE - The minimum memory size an enclave can be launched
50 * with.
51 */
52 #define NE_MIN_ENCLAVE_MEM_SIZE (64 * 1024UL * 1024UL)
53
54 /**
55 * NE_MIN_MEM_REGION_SIZE - The minimum size of an enclave memory region.
56 */
57 #define NE_MIN_MEM_REGION_SIZE (2 * 1024UL * 1024UL)
58
59 /**
60 * NE_PARENT_VM_CID - The CID for the vsock device of the primary / parent VM.
61 */
62 #define NE_PARENT_VM_CID (3)
63
64 static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
65
66 static const struct file_operations ne_fops = {
67 .owner = THIS_MODULE,
68 .llseek = noop_llseek,
69 .unlocked_ioctl = ne_ioctl,
70 };
71
72 static struct miscdevice ne_misc_dev = {
73 .minor = MISC_DYNAMIC_MINOR,
74 .name = "nitro_enclaves",
75 .fops = &ne_fops,
76 .mode = 0660,
77 };
78
79 struct ne_devs ne_devs = {
80 .ne_misc_dev = &ne_misc_dev,
81 };
82
83 /*
84 * TODO: Update logic to create new sysfs entries instead of using
85 * a kernel parameter e.g. if multiple sysfs files needed.
86 */
87 static int ne_set_kernel_param(const char *val, const struct kernel_param *kp);
88
89 static const struct kernel_param_ops ne_cpu_pool_ops = {
90 .get = param_get_string,
91 .set = ne_set_kernel_param,
92 };
93
94 static char ne_cpus[NE_CPUS_SIZE];
95 static struct kparam_string ne_cpus_arg = {
96 .maxlen = sizeof(ne_cpus),
97 .string = ne_cpus,
98 };
99
100 module_param_cb(ne_cpus, &ne_cpu_pool_ops, &ne_cpus_arg, 0644);
101 /* https://www.kernel.org/doc/html/latest/admin-guide/kernel-parameters.html#cpu-lists */
102 MODULE_PARM_DESC(ne_cpus, "<cpu-list> - CPU pool used for Nitro Enclaves");
103
104 /**
105 * struct ne_cpu_pool - CPU pool used for Nitro Enclaves.
106 * @avail_threads_per_core: Available full CPU cores to be dedicated to
107 * enclave(s). The cpumasks from the array, indexed
108 * by core id, contain all the threads from the
109 * available cores, that are not set for created
110 * enclave(s). The full CPU cores are part of the
111 * NE CPU pool.
112 * @mutex: Mutex for the access to the NE CPU pool.
113 * @nr_parent_vm_cores : The size of the available threads per core array.
114 * The total number of CPU cores available on the
115 * primary / parent VM.
116 * @nr_threads_per_core: The number of threads that a full CPU core has.
117 * @numa_node: NUMA node of the CPUs in the pool.
118 */
119 struct ne_cpu_pool {
120 cpumask_var_t *avail_threads_per_core;
121 struct mutex mutex;
122 unsigned int nr_parent_vm_cores;
123 unsigned int nr_threads_per_core;
124 int numa_node;
125 };
126
127 static struct ne_cpu_pool ne_cpu_pool;
128
129 /**
130 * struct ne_phys_contig_mem_regions - Contiguous physical memory regions.
131 * @num: The number of regions that currently has.
132 * @regions: The array of physical memory regions.
133 */
134 struct ne_phys_contig_mem_regions {
135 unsigned long num;
136 struct range *regions;
137 };
138
139 /**
140 * ne_check_enclaves_created() - Verify if at least one enclave has been created.
141 * @void: No parameters provided.
142 *
143 * Context: Process context.
144 * Return:
145 * * True if at least one enclave is created.
146 * * False otherwise.
147 */
ne_check_enclaves_created(void)148 static bool ne_check_enclaves_created(void)
149 {
150 struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev;
151 bool ret = false;
152
153 if (!ne_pci_dev)
154 return ret;
155
156 mutex_lock(&ne_pci_dev->enclaves_list_mutex);
157
158 if (!list_empty(&ne_pci_dev->enclaves_list))
159 ret = true;
160
161 mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
162
163 return ret;
164 }
165
166 /**
167 * ne_setup_cpu_pool() - Set the NE CPU pool after handling sanity checks such
168 * as not sharing CPU cores with the primary / parent VM
169 * or not using CPU 0, which should remain available for
170 * the primary / parent VM. Offline the CPUs from the
171 * pool after the checks passed.
172 * @ne_cpu_list: The CPU list used for setting NE CPU pool.
173 *
174 * Context: Process context.
175 * Return:
176 * * 0 on success.
177 * * Negative return value on failure.
178 */
ne_setup_cpu_pool(const char * ne_cpu_list)179 static int ne_setup_cpu_pool(const char *ne_cpu_list)
180 {
181 int core_id = -1;
182 unsigned int cpu = 0;
183 cpumask_var_t cpu_pool;
184 unsigned int cpu_sibling = 0;
185 unsigned int i = 0;
186 int numa_node = -1;
187 int rc = -EINVAL;
188
189 if (!zalloc_cpumask_var(&cpu_pool, GFP_KERNEL))
190 return -ENOMEM;
191
192 mutex_lock(&ne_cpu_pool.mutex);
193
194 rc = cpulist_parse(ne_cpu_list, cpu_pool);
195 if (rc < 0) {
196 pr_err("%s: Error in cpulist parse [rc=%d]\n", ne_misc_dev.name, rc);
197
198 goto free_pool_cpumask;
199 }
200
201 cpu = cpumask_any(cpu_pool);
202 if (cpu >= nr_cpu_ids) {
203 pr_err("%s: No CPUs available in CPU pool\n", ne_misc_dev.name);
204
205 rc = -EINVAL;
206
207 goto free_pool_cpumask;
208 }
209
210 /*
211 * Check if the CPUs are online, to further get info about them
212 * e.g. numa node, core id, siblings.
213 */
214 for_each_cpu(cpu, cpu_pool)
215 if (cpu_is_offline(cpu)) {
216 pr_err("%s: CPU %d is offline, has to be online to get its metadata\n",
217 ne_misc_dev.name, cpu);
218
219 rc = -EINVAL;
220
221 goto free_pool_cpumask;
222 }
223
224 /*
225 * Check if the CPUs from the NE CPU pool are from the same NUMA node.
226 */
227 for_each_cpu(cpu, cpu_pool)
228 if (numa_node < 0) {
229 numa_node = cpu_to_node(cpu);
230 if (numa_node < 0) {
231 pr_err("%s: Invalid NUMA node %d\n",
232 ne_misc_dev.name, numa_node);
233
234 rc = -EINVAL;
235
236 goto free_pool_cpumask;
237 }
238 } else {
239 if (numa_node != cpu_to_node(cpu)) {
240 pr_err("%s: CPUs with different NUMA nodes\n",
241 ne_misc_dev.name);
242
243 rc = -EINVAL;
244
245 goto free_pool_cpumask;
246 }
247 }
248
249 /*
250 * Check if CPU 0 and its siblings are included in the provided CPU pool
251 * They should remain available for the primary / parent VM.
252 */
253 if (cpumask_test_cpu(0, cpu_pool)) {
254 pr_err("%s: CPU 0 has to remain available\n", ne_misc_dev.name);
255
256 rc = -EINVAL;
257
258 goto free_pool_cpumask;
259 }
260
261 for_each_cpu(cpu_sibling, topology_sibling_cpumask(0)) {
262 if (cpumask_test_cpu(cpu_sibling, cpu_pool)) {
263 pr_err("%s: CPU sibling %d for CPU 0 is in CPU pool\n",
264 ne_misc_dev.name, cpu_sibling);
265
266 rc = -EINVAL;
267
268 goto free_pool_cpumask;
269 }
270 }
271
272 /*
273 * Check if CPU siblings are included in the provided CPU pool. The
274 * expectation is that full CPU cores are made available in the CPU pool
275 * for enclaves.
276 */
277 for_each_cpu(cpu, cpu_pool) {
278 for_each_cpu(cpu_sibling, topology_sibling_cpumask(cpu)) {
279 if (!cpumask_test_cpu(cpu_sibling, cpu_pool)) {
280 pr_err("%s: CPU %d is not in CPU pool\n",
281 ne_misc_dev.name, cpu_sibling);
282
283 rc = -EINVAL;
284
285 goto free_pool_cpumask;
286 }
287 }
288 }
289
290 /* Calculate the number of threads from a full CPU core. */
291 cpu = cpumask_any(cpu_pool);
292 for_each_cpu(cpu_sibling, topology_sibling_cpumask(cpu))
293 ne_cpu_pool.nr_threads_per_core++;
294
295 ne_cpu_pool.nr_parent_vm_cores = nr_cpu_ids / ne_cpu_pool.nr_threads_per_core;
296
297 ne_cpu_pool.avail_threads_per_core = kzalloc_objs(*ne_cpu_pool.avail_threads_per_core,
298 ne_cpu_pool.nr_parent_vm_cores,
299 GFP_KERNEL);
300 if (!ne_cpu_pool.avail_threads_per_core) {
301 rc = -ENOMEM;
302
303 goto free_pool_cpumask;
304 }
305
306 for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
307 if (!zalloc_cpumask_var(&ne_cpu_pool.avail_threads_per_core[i], GFP_KERNEL)) {
308 rc = -ENOMEM;
309
310 goto free_cores_cpumask;
311 }
312
313 /*
314 * Split the NE CPU pool in threads per core to keep the CPU topology
315 * after offlining the CPUs.
316 */
317 for_each_cpu(cpu, cpu_pool) {
318 core_id = topology_core_id(cpu);
319 if (core_id < 0 || core_id >= ne_cpu_pool.nr_parent_vm_cores) {
320 pr_err("%s: Invalid core id %d for CPU %d\n",
321 ne_misc_dev.name, core_id, cpu);
322
323 rc = -EINVAL;
324
325 goto clear_cpumask;
326 }
327
328 cpumask_set_cpu(cpu, ne_cpu_pool.avail_threads_per_core[core_id]);
329 }
330
331 /*
332 * CPUs that are given to enclave(s) should not be considered online
333 * by Linux anymore, as the hypervisor will degrade them to floating.
334 * The physical CPUs (full cores) are carved out of the primary / parent
335 * VM and given to the enclave VM. The same number of vCPUs would run
336 * on less pCPUs for the primary / parent VM.
337 *
338 * We offline them here, to not degrade performance and expose correct
339 * topology to Linux and user space.
340 */
341 for_each_cpu(cpu, cpu_pool) {
342 rc = remove_cpu(cpu);
343 if (rc != 0) {
344 pr_err("%s: CPU %d is not offlined [rc=%d]\n",
345 ne_misc_dev.name, cpu, rc);
346
347 goto online_cpus;
348 }
349 }
350
351 free_cpumask_var(cpu_pool);
352
353 ne_cpu_pool.numa_node = numa_node;
354
355 mutex_unlock(&ne_cpu_pool.mutex);
356
357 return 0;
358
359 online_cpus:
360 for_each_cpu(cpu, cpu_pool)
361 add_cpu(cpu);
362 clear_cpumask:
363 for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
364 cpumask_clear(ne_cpu_pool.avail_threads_per_core[i]);
365 free_cores_cpumask:
366 for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
367 free_cpumask_var(ne_cpu_pool.avail_threads_per_core[i]);
368 kfree(ne_cpu_pool.avail_threads_per_core);
369 free_pool_cpumask:
370 free_cpumask_var(cpu_pool);
371 ne_cpu_pool.nr_parent_vm_cores = 0;
372 ne_cpu_pool.nr_threads_per_core = 0;
373 ne_cpu_pool.numa_node = -1;
374 mutex_unlock(&ne_cpu_pool.mutex);
375
376 return rc;
377 }
378
379 /**
380 * ne_teardown_cpu_pool() - Online the CPUs from the NE CPU pool and cleanup the
381 * CPU pool.
382 * @void: No parameters provided.
383 *
384 * Context: Process context.
385 */
ne_teardown_cpu_pool(void)386 static void ne_teardown_cpu_pool(void)
387 {
388 unsigned int cpu = 0;
389 unsigned int i = 0;
390 int rc = -EINVAL;
391
392 mutex_lock(&ne_cpu_pool.mutex);
393
394 if (!ne_cpu_pool.nr_parent_vm_cores) {
395 mutex_unlock(&ne_cpu_pool.mutex);
396
397 return;
398 }
399
400 for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++) {
401 for_each_cpu(cpu, ne_cpu_pool.avail_threads_per_core[i]) {
402 rc = add_cpu(cpu);
403 if (rc != 0)
404 pr_err("%s: CPU %d is not onlined [rc=%d]\n",
405 ne_misc_dev.name, cpu, rc);
406 }
407
408 cpumask_clear(ne_cpu_pool.avail_threads_per_core[i]);
409
410 free_cpumask_var(ne_cpu_pool.avail_threads_per_core[i]);
411 }
412
413 kfree(ne_cpu_pool.avail_threads_per_core);
414 ne_cpu_pool.nr_parent_vm_cores = 0;
415 ne_cpu_pool.nr_threads_per_core = 0;
416 ne_cpu_pool.numa_node = -1;
417
418 mutex_unlock(&ne_cpu_pool.mutex);
419 }
420
421 /**
422 * ne_set_kernel_param() - Set the NE CPU pool value via the NE kernel parameter.
423 * @val: NE CPU pool string value.
424 * @kp : NE kernel parameter associated with the NE CPU pool.
425 *
426 * Context: Process context.
427 * Return:
428 * * 0 on success.
429 * * Negative return value on failure.
430 */
ne_set_kernel_param(const char * val,const struct kernel_param * kp)431 static int ne_set_kernel_param(const char *val, const struct kernel_param *kp)
432 {
433 char error_val[] = "";
434 int rc = -EINVAL;
435
436 if (!capable(CAP_SYS_ADMIN))
437 return -EPERM;
438
439 if (ne_check_enclaves_created()) {
440 pr_err("%s: The CPU pool is used by enclave(s)\n", ne_misc_dev.name);
441
442 return -EPERM;
443 }
444
445 ne_teardown_cpu_pool();
446
447 rc = ne_setup_cpu_pool(val);
448 if (rc < 0) {
449 pr_err("%s: Error in setup CPU pool [rc=%d]\n", ne_misc_dev.name, rc);
450
451 param_set_copystring(error_val, kp);
452
453 return rc;
454 }
455
456 rc = param_set_copystring(val, kp);
457 if (rc < 0) {
458 pr_err("%s: Error in param set copystring [rc=%d]\n", ne_misc_dev.name, rc);
459
460 ne_teardown_cpu_pool();
461
462 param_set_copystring(error_val, kp);
463
464 return rc;
465 }
466
467 return 0;
468 }
469
470 /**
471 * ne_donated_cpu() - Check if the provided CPU is already used by the enclave.
472 * @ne_enclave : Private data associated with the current enclave.
473 * @cpu: CPU to check if already used.
474 *
475 * Context: Process context. This function is called with the ne_enclave mutex held.
476 * Return:
477 * * True if the provided CPU is already used by the enclave.
478 * * False otherwise.
479 */
ne_donated_cpu(struct ne_enclave * ne_enclave,unsigned int cpu)480 static bool ne_donated_cpu(struct ne_enclave *ne_enclave, unsigned int cpu)
481 {
482 if (cpumask_test_cpu(cpu, ne_enclave->vcpu_ids))
483 return true;
484
485 return false;
486 }
487
488 /**
489 * ne_get_unused_core_from_cpu_pool() - Get the id of a full core from the
490 * NE CPU pool.
491 * @void: No parameters provided.
492 *
493 * Context: Process context. This function is called with the ne_enclave and
494 * ne_cpu_pool mutexes held.
495 * Return:
496 * * Core id.
497 * * -1 if no CPU core available in the pool.
498 */
ne_get_unused_core_from_cpu_pool(void)499 static int ne_get_unused_core_from_cpu_pool(void)
500 {
501 int core_id = -1;
502 unsigned int i = 0;
503
504 for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
505 if (!cpumask_empty(ne_cpu_pool.avail_threads_per_core[i])) {
506 core_id = i;
507
508 break;
509 }
510
511 return core_id;
512 }
513
514 /**
515 * ne_set_enclave_threads_per_core() - Set the threads of the provided core in
516 * the enclave data structure.
517 * @ne_enclave : Private data associated with the current enclave.
518 * @core_id: Core id to get its threads from the NE CPU pool.
519 * @vcpu_id: vCPU id part of the provided core.
520 *
521 * Context: Process context. This function is called with the ne_enclave and
522 * ne_cpu_pool mutexes held.
523 * Return:
524 * * 0 on success.
525 * * Negative return value on failure.
526 */
ne_set_enclave_threads_per_core(struct ne_enclave * ne_enclave,int core_id,u32 vcpu_id)527 static int ne_set_enclave_threads_per_core(struct ne_enclave *ne_enclave,
528 int core_id, u32 vcpu_id)
529 {
530 unsigned int cpu = 0;
531
532 if (core_id < 0 && vcpu_id == 0) {
533 dev_err_ratelimited(ne_misc_dev.this_device,
534 "No CPUs available in NE CPU pool\n");
535
536 return -NE_ERR_NO_CPUS_AVAIL_IN_POOL;
537 }
538
539 if (core_id < 0) {
540 dev_err_ratelimited(ne_misc_dev.this_device,
541 "CPU %d is not in NE CPU pool\n", vcpu_id);
542
543 return -NE_ERR_VCPU_NOT_IN_CPU_POOL;
544 }
545
546 if (core_id >= ne_enclave->nr_parent_vm_cores) {
547 dev_err_ratelimited(ne_misc_dev.this_device,
548 "Invalid core id %d - ne_enclave\n", core_id);
549
550 return -NE_ERR_VCPU_INVALID_CPU_CORE;
551 }
552
553 for_each_cpu(cpu, ne_cpu_pool.avail_threads_per_core[core_id])
554 cpumask_set_cpu(cpu, ne_enclave->threads_per_core[core_id]);
555
556 cpumask_clear(ne_cpu_pool.avail_threads_per_core[core_id]);
557
558 return 0;
559 }
560
561 /**
562 * ne_get_cpu_from_cpu_pool() - Get a CPU from the NE CPU pool, either from the
563 * remaining sibling(s) of a CPU core or the first
564 * sibling of a new CPU core.
565 * @ne_enclave : Private data associated with the current enclave.
566 * @vcpu_id: vCPU to get from the NE CPU pool.
567 *
568 * Context: Process context. This function is called with the ne_enclave mutex held.
569 * Return:
570 * * 0 on success.
571 * * Negative return value on failure.
572 */
ne_get_cpu_from_cpu_pool(struct ne_enclave * ne_enclave,u32 * vcpu_id)573 static int ne_get_cpu_from_cpu_pool(struct ne_enclave *ne_enclave, u32 *vcpu_id)
574 {
575 int core_id = -1;
576 unsigned int cpu = 0;
577 unsigned int i = 0;
578 int rc = -EINVAL;
579
580 /*
581 * If previously allocated a thread of a core to this enclave, first
582 * check remaining sibling(s) for new CPU allocations, so that full
583 * CPU cores are used for the enclave.
584 */
585 for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++)
586 for_each_cpu(cpu, ne_enclave->threads_per_core[i])
587 if (!ne_donated_cpu(ne_enclave, cpu)) {
588 *vcpu_id = cpu;
589
590 return 0;
591 }
592
593 mutex_lock(&ne_cpu_pool.mutex);
594
595 /*
596 * If no remaining siblings, get a core from the NE CPU pool and keep
597 * track of all the threads in the enclave threads per core data structure.
598 */
599 core_id = ne_get_unused_core_from_cpu_pool();
600
601 rc = ne_set_enclave_threads_per_core(ne_enclave, core_id, *vcpu_id);
602 if (rc < 0)
603 goto unlock_mutex;
604
605 *vcpu_id = cpumask_any(ne_enclave->threads_per_core[core_id]);
606
607 rc = 0;
608
609 unlock_mutex:
610 mutex_unlock(&ne_cpu_pool.mutex);
611
612 return rc;
613 }
614
615 /**
616 * ne_get_vcpu_core_from_cpu_pool() - Get from the NE CPU pool the id of the
617 * core associated with the provided vCPU.
618 * @vcpu_id: Provided vCPU id to get its associated core id.
619 *
620 * Context: Process context. This function is called with the ne_enclave and
621 * ne_cpu_pool mutexes held.
622 * Return:
623 * * Core id.
624 * * -1 if the provided vCPU is not in the pool.
625 */
ne_get_vcpu_core_from_cpu_pool(u32 vcpu_id)626 static int ne_get_vcpu_core_from_cpu_pool(u32 vcpu_id)
627 {
628 int core_id = -1;
629 unsigned int i = 0;
630
631 for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
632 if (cpumask_test_cpu(vcpu_id, ne_cpu_pool.avail_threads_per_core[i])) {
633 core_id = i;
634
635 break;
636 }
637
638 return core_id;
639 }
640
641 /**
642 * ne_check_cpu_in_cpu_pool() - Check if the given vCPU is in the available CPUs
643 * from the pool.
644 * @ne_enclave : Private data associated with the current enclave.
645 * @vcpu_id: ID of the vCPU to check if available in the NE CPU pool.
646 *
647 * Context: Process context. This function is called with the ne_enclave mutex held.
648 * Return:
649 * * 0 on success.
650 * * Negative return value on failure.
651 */
ne_check_cpu_in_cpu_pool(struct ne_enclave * ne_enclave,u32 vcpu_id)652 static int ne_check_cpu_in_cpu_pool(struct ne_enclave *ne_enclave, u32 vcpu_id)
653 {
654 int core_id = -1;
655 unsigned int i = 0;
656 int rc = -EINVAL;
657
658 if (ne_donated_cpu(ne_enclave, vcpu_id)) {
659 dev_err_ratelimited(ne_misc_dev.this_device,
660 "CPU %d already used\n", vcpu_id);
661
662 return -NE_ERR_VCPU_ALREADY_USED;
663 }
664
665 /*
666 * If previously allocated a thread of a core to this enclave, but not
667 * the full core, first check remaining sibling(s).
668 */
669 for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++)
670 if (cpumask_test_cpu(vcpu_id, ne_enclave->threads_per_core[i]))
671 return 0;
672
673 mutex_lock(&ne_cpu_pool.mutex);
674
675 /*
676 * If no remaining siblings, get from the NE CPU pool the core
677 * associated with the vCPU and keep track of all the threads in the
678 * enclave threads per core data structure.
679 */
680 core_id = ne_get_vcpu_core_from_cpu_pool(vcpu_id);
681
682 rc = ne_set_enclave_threads_per_core(ne_enclave, core_id, vcpu_id);
683 if (rc < 0)
684 goto unlock_mutex;
685
686 rc = 0;
687
688 unlock_mutex:
689 mutex_unlock(&ne_cpu_pool.mutex);
690
691 return rc;
692 }
693
694 /**
695 * ne_add_vcpu_ioctl() - Add a vCPU to the slot associated with the current
696 * enclave.
697 * @ne_enclave : Private data associated with the current enclave.
698 * @vcpu_id: ID of the CPU to be associated with the given slot,
699 * apic id on x86.
700 *
701 * Context: Process context. This function is called with the ne_enclave mutex held.
702 * Return:
703 * * 0 on success.
704 * * Negative return value on failure.
705 */
ne_add_vcpu_ioctl(struct ne_enclave * ne_enclave,u32 vcpu_id)706 static int ne_add_vcpu_ioctl(struct ne_enclave *ne_enclave, u32 vcpu_id)
707 {
708 struct ne_pci_dev_cmd_reply cmd_reply = {};
709 struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev;
710 int rc = -EINVAL;
711 struct slot_add_vcpu_req slot_add_vcpu_req = {};
712
713 if (ne_enclave->mm != current->mm)
714 return -EIO;
715
716 slot_add_vcpu_req.slot_uid = ne_enclave->slot_uid;
717 slot_add_vcpu_req.vcpu_id = vcpu_id;
718
719 rc = ne_do_request(pdev, SLOT_ADD_VCPU,
720 &slot_add_vcpu_req, sizeof(slot_add_vcpu_req),
721 &cmd_reply, sizeof(cmd_reply));
722 if (rc < 0) {
723 dev_err_ratelimited(ne_misc_dev.this_device,
724 "Error in slot add vCPU [rc=%d]\n", rc);
725
726 return rc;
727 }
728
729 cpumask_set_cpu(vcpu_id, ne_enclave->vcpu_ids);
730
731 ne_enclave->nr_vcpus++;
732
733 return 0;
734 }
735
736 /**
737 * ne_sanity_check_user_mem_region() - Sanity check the user space memory
738 * region received during the set user
739 * memory region ioctl call.
740 * @ne_enclave : Private data associated with the current enclave.
741 * @mem_region : User space memory region to be sanity checked.
742 *
743 * Context: Process context. This function is called with the ne_enclave mutex held.
744 * Return:
745 * * 0 on success.
746 * * Negative return value on failure.
747 */
ne_sanity_check_user_mem_region(struct ne_enclave * ne_enclave,struct ne_user_memory_region mem_region)748 static int ne_sanity_check_user_mem_region(struct ne_enclave *ne_enclave,
749 struct ne_user_memory_region mem_region)
750 {
751 struct ne_mem_region *ne_mem_region = NULL;
752
753 if (ne_enclave->mm != current->mm)
754 return -EIO;
755
756 if (mem_region.memory_size & (NE_MIN_MEM_REGION_SIZE - 1)) {
757 dev_err_ratelimited(ne_misc_dev.this_device,
758 "User space memory size is not multiple of 2 MiB\n");
759
760 return -NE_ERR_INVALID_MEM_REGION_SIZE;
761 }
762
763 if (!IS_ALIGNED(mem_region.userspace_addr, NE_MIN_MEM_REGION_SIZE)) {
764 dev_err_ratelimited(ne_misc_dev.this_device,
765 "User space address is not 2 MiB aligned\n");
766
767 return -NE_ERR_UNALIGNED_MEM_REGION_ADDR;
768 }
769
770 if ((mem_region.userspace_addr & (NE_MIN_MEM_REGION_SIZE - 1)) ||
771 !access_ok((void __user *)(unsigned long)mem_region.userspace_addr,
772 mem_region.memory_size)) {
773 dev_err_ratelimited(ne_misc_dev.this_device,
774 "Invalid user space address range\n");
775
776 return -NE_ERR_INVALID_MEM_REGION_ADDR;
777 }
778
779 list_for_each_entry(ne_mem_region, &ne_enclave->mem_regions_list,
780 mem_region_list_entry) {
781 u64 memory_size = ne_mem_region->memory_size;
782 u64 userspace_addr = ne_mem_region->userspace_addr;
783
784 if ((userspace_addr <= mem_region.userspace_addr &&
785 mem_region.userspace_addr < (userspace_addr + memory_size)) ||
786 (mem_region.userspace_addr <= userspace_addr &&
787 (mem_region.userspace_addr + mem_region.memory_size) > userspace_addr)) {
788 dev_err_ratelimited(ne_misc_dev.this_device,
789 "User space memory region already used\n");
790
791 return -NE_ERR_MEM_REGION_ALREADY_USED;
792 }
793 }
794
795 return 0;
796 }
797
798 /**
799 * ne_sanity_check_user_mem_region_page() - Sanity check a page from the user space
800 * memory region received during the set
801 * user memory region ioctl call.
802 * @ne_enclave : Private data associated with the current enclave.
803 * @mem_region_page: Page from the user space memory region to be sanity checked.
804 *
805 * Context: Process context. This function is called with the ne_enclave mutex held.
806 * Return:
807 * * 0 on success.
808 * * Negative return value on failure.
809 */
ne_sanity_check_user_mem_region_page(struct ne_enclave * ne_enclave,struct page * mem_region_page)810 static int ne_sanity_check_user_mem_region_page(struct ne_enclave *ne_enclave,
811 struct page *mem_region_page)
812 {
813 if (!PageHuge(mem_region_page)) {
814 dev_err_ratelimited(ne_misc_dev.this_device,
815 "Not a hugetlbfs page\n");
816
817 return -NE_ERR_MEM_NOT_HUGE_PAGE;
818 }
819
820 if (page_size(mem_region_page) & (NE_MIN_MEM_REGION_SIZE - 1)) {
821 dev_err_ratelimited(ne_misc_dev.this_device,
822 "Page size not multiple of 2 MiB\n");
823
824 return -NE_ERR_INVALID_PAGE_SIZE;
825 }
826
827 if (ne_enclave->numa_node != page_to_nid(mem_region_page)) {
828 dev_err_ratelimited(ne_misc_dev.this_device,
829 "Page is not from NUMA node %d\n",
830 ne_enclave->numa_node);
831
832 return -NE_ERR_MEM_DIFFERENT_NUMA_NODE;
833 }
834
835 return 0;
836 }
837
838 /**
839 * ne_sanity_check_phys_mem_region() - Sanity check the start address and the size
840 * of a physical memory region.
841 * @phys_mem_region_paddr : Physical start address of the region to be sanity checked.
842 * @phys_mem_region_size : Length of the region to be sanity checked.
843 *
844 * Context: Process context. This function is called with the ne_enclave mutex held.
845 * Return:
846 * * 0 on success.
847 * * Negative return value on failure.
848 */
ne_sanity_check_phys_mem_region(u64 phys_mem_region_paddr,u64 phys_mem_region_size)849 static int ne_sanity_check_phys_mem_region(u64 phys_mem_region_paddr,
850 u64 phys_mem_region_size)
851 {
852 if (phys_mem_region_size & (NE_MIN_MEM_REGION_SIZE - 1)) {
853 dev_err_ratelimited(ne_misc_dev.this_device,
854 "Physical mem region size is not multiple of 2 MiB\n");
855
856 return -EINVAL;
857 }
858
859 if (!IS_ALIGNED(phys_mem_region_paddr, NE_MIN_MEM_REGION_SIZE)) {
860 dev_err_ratelimited(ne_misc_dev.this_device,
861 "Physical mem region address is not 2 MiB aligned\n");
862
863 return -EINVAL;
864 }
865
866 return 0;
867 }
868
869 /**
870 * ne_merge_phys_contig_memory_regions() - Add a memory region and merge the adjacent
871 * regions if they are physically contiguous.
872 * @phys_contig_regions : Private data associated with the contiguous physical memory regions.
873 * @page_paddr : Physical start address of the region to be added.
874 * @page_size : Length of the region to be added.
875 *
876 * Context: Process context. This function is called with the ne_enclave mutex held.
877 * Return:
878 * * 0 on success.
879 * * Negative return value on failure.
880 */
881 static int
ne_merge_phys_contig_memory_regions(struct ne_phys_contig_mem_regions * phys_contig_regions,u64 page_paddr,u64 page_size)882 ne_merge_phys_contig_memory_regions(struct ne_phys_contig_mem_regions *phys_contig_regions,
883 u64 page_paddr, u64 page_size)
884 {
885 unsigned long num = phys_contig_regions->num;
886 int rc = 0;
887
888 rc = ne_sanity_check_phys_mem_region(page_paddr, page_size);
889 if (rc < 0)
890 return rc;
891
892 /* Physically contiguous, just merge */
893 if (num && (phys_contig_regions->regions[num - 1].end + 1) == page_paddr) {
894 phys_contig_regions->regions[num - 1].end += page_size;
895 } else {
896 phys_contig_regions->regions[num].start = page_paddr;
897 phys_contig_regions->regions[num].end = page_paddr + page_size - 1;
898 phys_contig_regions->num++;
899 }
900
901 return 0;
902 }
903
904 /**
905 * ne_set_user_memory_region_ioctl() - Add user space memory region to the slot
906 * associated with the current enclave.
907 * @ne_enclave : Private data associated with the current enclave.
908 * @mem_region : User space memory region to be associated with the given slot.
909 *
910 * Context: Process context. This function is called with the ne_enclave mutex held.
911 * Return:
912 * * 0 on success.
913 * * Negative return value on failure.
914 */
ne_set_user_memory_region_ioctl(struct ne_enclave * ne_enclave,struct ne_user_memory_region mem_region)915 static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
916 struct ne_user_memory_region mem_region)
917 {
918 long gup_rc = 0;
919 unsigned long i = 0;
920 unsigned long max_nr_pages = 0;
921 unsigned long memory_size = 0;
922 struct ne_mem_region *ne_mem_region = NULL;
923 struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev;
924 struct ne_phys_contig_mem_regions phys_contig_mem_regions = {};
925 int rc = -EINVAL;
926
927 rc = ne_sanity_check_user_mem_region(ne_enclave, mem_region);
928 if (rc < 0)
929 return rc;
930
931 ne_mem_region = kzalloc_obj(*ne_mem_region, GFP_KERNEL);
932 if (!ne_mem_region)
933 return -ENOMEM;
934
935 max_nr_pages = mem_region.memory_size / NE_MIN_MEM_REGION_SIZE;
936
937 ne_mem_region->pages = kzalloc_objs(*ne_mem_region->pages, max_nr_pages,
938 GFP_KERNEL);
939 if (!ne_mem_region->pages) {
940 rc = -ENOMEM;
941
942 goto free_mem_region;
943 }
944
945 phys_contig_mem_regions.regions = kzalloc_objs(*phys_contig_mem_regions.regions,
946 max_nr_pages, GFP_KERNEL);
947 if (!phys_contig_mem_regions.regions) {
948 rc = -ENOMEM;
949
950 goto free_mem_region;
951 }
952
953 do {
954 i = ne_mem_region->nr_pages;
955
956 if (i == max_nr_pages) {
957 dev_err_ratelimited(ne_misc_dev.this_device,
958 "Reached max nr of pages in the pages data struct\n");
959
960 rc = -ENOMEM;
961
962 goto put_pages;
963 }
964
965 gup_rc = get_user_pages_unlocked(mem_region.userspace_addr + memory_size, 1,
966 ne_mem_region->pages + i, FOLL_GET);
967
968 if (gup_rc < 0) {
969 rc = gup_rc;
970
971 dev_err_ratelimited(ne_misc_dev.this_device,
972 "Error in get user pages [rc=%d]\n", rc);
973
974 goto put_pages;
975 }
976
977 rc = ne_sanity_check_user_mem_region_page(ne_enclave, ne_mem_region->pages[i]);
978 if (rc < 0)
979 goto put_pages;
980
981 rc = ne_merge_phys_contig_memory_regions(&phys_contig_mem_regions,
982 page_to_phys(ne_mem_region->pages[i]),
983 page_size(ne_mem_region->pages[i]));
984 if (rc < 0)
985 goto put_pages;
986
987 memory_size += page_size(ne_mem_region->pages[i]);
988
989 ne_mem_region->nr_pages++;
990 } while (memory_size < mem_region.memory_size);
991
992 if ((ne_enclave->nr_mem_regions + phys_contig_mem_regions.num) >
993 ne_enclave->max_mem_regions) {
994 dev_err_ratelimited(ne_misc_dev.this_device,
995 "Reached max memory regions %lld\n",
996 ne_enclave->max_mem_regions);
997
998 rc = -NE_ERR_MEM_MAX_REGIONS;
999
1000 goto put_pages;
1001 }
1002
1003 for (i = 0; i < phys_contig_mem_regions.num; i++) {
1004 u64 phys_region_addr = phys_contig_mem_regions.regions[i].start;
1005 u64 phys_region_size = range_len(&phys_contig_mem_regions.regions[i]);
1006
1007 rc = ne_sanity_check_phys_mem_region(phys_region_addr, phys_region_size);
1008 if (rc < 0)
1009 goto put_pages;
1010 }
1011
1012 ne_mem_region->memory_size = mem_region.memory_size;
1013 ne_mem_region->userspace_addr = mem_region.userspace_addr;
1014
1015 list_add(&ne_mem_region->mem_region_list_entry, &ne_enclave->mem_regions_list);
1016
1017 for (i = 0; i < phys_contig_mem_regions.num; i++) {
1018 struct ne_pci_dev_cmd_reply cmd_reply = {};
1019 struct slot_add_mem_req slot_add_mem_req = {};
1020
1021 slot_add_mem_req.slot_uid = ne_enclave->slot_uid;
1022 slot_add_mem_req.paddr = phys_contig_mem_regions.regions[i].start;
1023 slot_add_mem_req.size = range_len(&phys_contig_mem_regions.regions[i]);
1024
1025 rc = ne_do_request(pdev, SLOT_ADD_MEM,
1026 &slot_add_mem_req, sizeof(slot_add_mem_req),
1027 &cmd_reply, sizeof(cmd_reply));
1028 if (rc < 0) {
1029 dev_err_ratelimited(ne_misc_dev.this_device,
1030 "Error in slot add mem [rc=%d]\n", rc);
1031
1032 kfree(phys_contig_mem_regions.regions);
1033
1034 /*
1035 * Exit here without put pages as memory regions may
1036 * already been added.
1037 */
1038 return rc;
1039 }
1040
1041 ne_enclave->mem_size += slot_add_mem_req.size;
1042 ne_enclave->nr_mem_regions++;
1043 }
1044
1045 kfree(phys_contig_mem_regions.regions);
1046
1047 return 0;
1048
1049 put_pages:
1050 for (i = 0; i < ne_mem_region->nr_pages; i++)
1051 put_page(ne_mem_region->pages[i]);
1052 free_mem_region:
1053 kfree(phys_contig_mem_regions.regions);
1054 kfree(ne_mem_region->pages);
1055 kfree(ne_mem_region);
1056
1057 return rc;
1058 }
1059
1060 /**
1061 * ne_start_enclave_ioctl() - Trigger enclave start after the enclave resources,
1062 * such as memory and CPU, have been set.
1063 * @ne_enclave : Private data associated with the current enclave.
1064 * @enclave_start_info : Enclave info that includes enclave cid and flags.
1065 *
1066 * Context: Process context. This function is called with the ne_enclave mutex held.
1067 * Return:
1068 * * 0 on success.
1069 * * Negative return value on failure.
1070 */
ne_start_enclave_ioctl(struct ne_enclave * ne_enclave,struct ne_enclave_start_info * enclave_start_info)1071 static int ne_start_enclave_ioctl(struct ne_enclave *ne_enclave,
1072 struct ne_enclave_start_info *enclave_start_info)
1073 {
1074 struct ne_pci_dev_cmd_reply cmd_reply = {};
1075 unsigned int cpu = 0;
1076 struct enclave_start_req enclave_start_req = {};
1077 unsigned int i = 0;
1078 struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev;
1079 int rc = -EINVAL;
1080
1081 if (!ne_enclave->nr_mem_regions) {
1082 dev_err_ratelimited(ne_misc_dev.this_device,
1083 "Enclave has no mem regions\n");
1084
1085 return -NE_ERR_NO_MEM_REGIONS_ADDED;
1086 }
1087
1088 if (ne_enclave->mem_size < NE_MIN_ENCLAVE_MEM_SIZE) {
1089 dev_err_ratelimited(ne_misc_dev.this_device,
1090 "Enclave memory is less than %ld\n",
1091 NE_MIN_ENCLAVE_MEM_SIZE);
1092
1093 return -NE_ERR_ENCLAVE_MEM_MIN_SIZE;
1094 }
1095
1096 if (!ne_enclave->nr_vcpus) {
1097 dev_err_ratelimited(ne_misc_dev.this_device,
1098 "Enclave has no vCPUs\n");
1099
1100 return -NE_ERR_NO_VCPUS_ADDED;
1101 }
1102
1103 for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++)
1104 for_each_cpu(cpu, ne_enclave->threads_per_core[i])
1105 if (!cpumask_test_cpu(cpu, ne_enclave->vcpu_ids)) {
1106 dev_err_ratelimited(ne_misc_dev.this_device,
1107 "Full CPU cores not used\n");
1108
1109 return -NE_ERR_FULL_CORES_NOT_USED;
1110 }
1111
1112 enclave_start_req.enclave_cid = enclave_start_info->enclave_cid;
1113 enclave_start_req.flags = enclave_start_info->flags;
1114 enclave_start_req.slot_uid = ne_enclave->slot_uid;
1115
1116 rc = ne_do_request(pdev, ENCLAVE_START,
1117 &enclave_start_req, sizeof(enclave_start_req),
1118 &cmd_reply, sizeof(cmd_reply));
1119 if (rc < 0) {
1120 dev_err_ratelimited(ne_misc_dev.this_device,
1121 "Error in enclave start [rc=%d]\n", rc);
1122
1123 return rc;
1124 }
1125
1126 ne_enclave->state = NE_STATE_RUNNING;
1127
1128 enclave_start_info->enclave_cid = cmd_reply.enclave_cid;
1129
1130 return 0;
1131 }
1132
1133 /**
1134 * ne_enclave_ioctl() - Ioctl function provided by the enclave file.
1135 * @file: File associated with this ioctl function.
1136 * @cmd: The command that is set for the ioctl call.
1137 * @arg: The argument that is provided for the ioctl call.
1138 *
1139 * Context: Process context.
1140 * Return:
1141 * * 0 on success.
1142 * * Negative return value on failure.
1143 */
ne_enclave_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1144 static long ne_enclave_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1145 {
1146 struct ne_enclave *ne_enclave = file->private_data;
1147
1148 switch (cmd) {
1149 case NE_ADD_VCPU: {
1150 int rc = -EINVAL;
1151 u32 vcpu_id = 0;
1152
1153 if (copy_from_user(&vcpu_id, (void __user *)arg, sizeof(vcpu_id)))
1154 return -EFAULT;
1155
1156 mutex_lock(&ne_enclave->enclave_info_mutex);
1157
1158 if (ne_enclave->state != NE_STATE_INIT) {
1159 dev_err_ratelimited(ne_misc_dev.this_device,
1160 "Enclave is not in init state\n");
1161
1162 mutex_unlock(&ne_enclave->enclave_info_mutex);
1163
1164 return -NE_ERR_NOT_IN_INIT_STATE;
1165 }
1166
1167 if (vcpu_id >= (ne_enclave->nr_parent_vm_cores *
1168 ne_enclave->nr_threads_per_core)) {
1169 dev_err_ratelimited(ne_misc_dev.this_device,
1170 "vCPU id higher than max CPU id\n");
1171
1172 mutex_unlock(&ne_enclave->enclave_info_mutex);
1173
1174 return -NE_ERR_INVALID_VCPU;
1175 }
1176
1177 if (!vcpu_id) {
1178 /* Use the CPU pool for choosing a CPU for the enclave. */
1179 rc = ne_get_cpu_from_cpu_pool(ne_enclave, &vcpu_id);
1180 if (rc < 0) {
1181 dev_err_ratelimited(ne_misc_dev.this_device,
1182 "Error in get CPU from pool [rc=%d]\n",
1183 rc);
1184
1185 mutex_unlock(&ne_enclave->enclave_info_mutex);
1186
1187 return rc;
1188 }
1189 } else {
1190 /* Check if the provided vCPU is available in the NE CPU pool. */
1191 rc = ne_check_cpu_in_cpu_pool(ne_enclave, vcpu_id);
1192 if (rc < 0) {
1193 dev_err_ratelimited(ne_misc_dev.this_device,
1194 "Error in check CPU %d in pool [rc=%d]\n",
1195 vcpu_id, rc);
1196
1197 mutex_unlock(&ne_enclave->enclave_info_mutex);
1198
1199 return rc;
1200 }
1201 }
1202
1203 rc = ne_add_vcpu_ioctl(ne_enclave, vcpu_id);
1204 if (rc < 0) {
1205 mutex_unlock(&ne_enclave->enclave_info_mutex);
1206
1207 return rc;
1208 }
1209
1210 mutex_unlock(&ne_enclave->enclave_info_mutex);
1211
1212 if (copy_to_user((void __user *)arg, &vcpu_id, sizeof(vcpu_id)))
1213 return -EFAULT;
1214
1215 return 0;
1216 }
1217
1218 case NE_GET_IMAGE_LOAD_INFO: {
1219 struct ne_image_load_info image_load_info = {};
1220
1221 if (copy_from_user(&image_load_info, (void __user *)arg, sizeof(image_load_info)))
1222 return -EFAULT;
1223
1224 mutex_lock(&ne_enclave->enclave_info_mutex);
1225
1226 if (ne_enclave->state != NE_STATE_INIT) {
1227 dev_err_ratelimited(ne_misc_dev.this_device,
1228 "Enclave is not in init state\n");
1229
1230 mutex_unlock(&ne_enclave->enclave_info_mutex);
1231
1232 return -NE_ERR_NOT_IN_INIT_STATE;
1233 }
1234
1235 mutex_unlock(&ne_enclave->enclave_info_mutex);
1236
1237 if (!image_load_info.flags ||
1238 image_load_info.flags >= NE_IMAGE_LOAD_MAX_FLAG_VAL) {
1239 dev_err_ratelimited(ne_misc_dev.this_device,
1240 "Incorrect flag in enclave image load info\n");
1241
1242 return -NE_ERR_INVALID_FLAG_VALUE;
1243 }
1244
1245 if (image_load_info.flags == NE_EIF_IMAGE)
1246 image_load_info.memory_offset = NE_EIF_LOAD_OFFSET;
1247
1248 if (copy_to_user((void __user *)arg, &image_load_info, sizeof(image_load_info)))
1249 return -EFAULT;
1250
1251 return 0;
1252 }
1253
1254 case NE_SET_USER_MEMORY_REGION: {
1255 struct ne_user_memory_region mem_region = {};
1256 int rc = -EINVAL;
1257
1258 if (copy_from_user(&mem_region, (void __user *)arg, sizeof(mem_region)))
1259 return -EFAULT;
1260
1261 if (mem_region.flags >= NE_MEMORY_REGION_MAX_FLAG_VAL) {
1262 dev_err_ratelimited(ne_misc_dev.this_device,
1263 "Incorrect flag for user memory region\n");
1264
1265 return -NE_ERR_INVALID_FLAG_VALUE;
1266 }
1267
1268 mutex_lock(&ne_enclave->enclave_info_mutex);
1269
1270 if (ne_enclave->state != NE_STATE_INIT) {
1271 dev_err_ratelimited(ne_misc_dev.this_device,
1272 "Enclave is not in init state\n");
1273
1274 mutex_unlock(&ne_enclave->enclave_info_mutex);
1275
1276 return -NE_ERR_NOT_IN_INIT_STATE;
1277 }
1278
1279 rc = ne_set_user_memory_region_ioctl(ne_enclave, mem_region);
1280 if (rc < 0) {
1281 mutex_unlock(&ne_enclave->enclave_info_mutex);
1282
1283 return rc;
1284 }
1285
1286 mutex_unlock(&ne_enclave->enclave_info_mutex);
1287
1288 return 0;
1289 }
1290
1291 case NE_START_ENCLAVE: {
1292 struct ne_enclave_start_info enclave_start_info = {};
1293 int rc = -EINVAL;
1294
1295 if (copy_from_user(&enclave_start_info, (void __user *)arg,
1296 sizeof(enclave_start_info)))
1297 return -EFAULT;
1298
1299 if (enclave_start_info.flags >= NE_ENCLAVE_START_MAX_FLAG_VAL) {
1300 dev_err_ratelimited(ne_misc_dev.this_device,
1301 "Incorrect flag in enclave start info\n");
1302
1303 return -NE_ERR_INVALID_FLAG_VALUE;
1304 }
1305
1306 /*
1307 * Do not use well-known CIDs - 0, 1, 2 - for enclaves.
1308 * VMADDR_CID_ANY = -1U
1309 * VMADDR_CID_HYPERVISOR = 0
1310 * VMADDR_CID_LOCAL = 1
1311 * VMADDR_CID_HOST = 2
1312 * Note: 0 is used as a placeholder to auto-generate an enclave CID.
1313 * http://man7.org/linux/man-pages/man7/vsock.7.html
1314 */
1315 if (enclave_start_info.enclave_cid > 0 &&
1316 enclave_start_info.enclave_cid <= VMADDR_CID_HOST) {
1317 dev_err_ratelimited(ne_misc_dev.this_device,
1318 "Well-known CID value, not to be used for enclaves\n");
1319
1320 return -NE_ERR_INVALID_ENCLAVE_CID;
1321 }
1322
1323 if (enclave_start_info.enclave_cid == U32_MAX) {
1324 dev_err_ratelimited(ne_misc_dev.this_device,
1325 "Well-known CID value, not to be used for enclaves\n");
1326
1327 return -NE_ERR_INVALID_ENCLAVE_CID;
1328 }
1329
1330 /*
1331 * Do not use the CID of the primary / parent VM for enclaves.
1332 */
1333 if (enclave_start_info.enclave_cid == NE_PARENT_VM_CID) {
1334 dev_err_ratelimited(ne_misc_dev.this_device,
1335 "CID of the parent VM, not to be used for enclaves\n");
1336
1337 return -NE_ERR_INVALID_ENCLAVE_CID;
1338 }
1339
1340 /* 64-bit CIDs are not yet supported for the vsock device. */
1341 if (enclave_start_info.enclave_cid > U32_MAX) {
1342 dev_err_ratelimited(ne_misc_dev.this_device,
1343 "64-bit CIDs not yet supported for the vsock device\n");
1344
1345 return -NE_ERR_INVALID_ENCLAVE_CID;
1346 }
1347
1348 mutex_lock(&ne_enclave->enclave_info_mutex);
1349
1350 if (ne_enclave->state != NE_STATE_INIT) {
1351 dev_err_ratelimited(ne_misc_dev.this_device,
1352 "Enclave is not in init state\n");
1353
1354 mutex_unlock(&ne_enclave->enclave_info_mutex);
1355
1356 return -NE_ERR_NOT_IN_INIT_STATE;
1357 }
1358
1359 rc = ne_start_enclave_ioctl(ne_enclave, &enclave_start_info);
1360 if (rc < 0) {
1361 mutex_unlock(&ne_enclave->enclave_info_mutex);
1362
1363 return rc;
1364 }
1365
1366 mutex_unlock(&ne_enclave->enclave_info_mutex);
1367
1368 if (copy_to_user((void __user *)arg, &enclave_start_info,
1369 sizeof(enclave_start_info)))
1370 return -EFAULT;
1371
1372 return 0;
1373 }
1374
1375 default:
1376 return -ENOTTY;
1377 }
1378
1379 return 0;
1380 }
1381
1382 /**
1383 * ne_enclave_remove_all_mem_region_entries() - Remove all memory region entries
1384 * from the enclave data structure.
1385 * @ne_enclave : Private data associated with the current enclave.
1386 *
1387 * Context: Process context. This function is called with the ne_enclave mutex held.
1388 */
ne_enclave_remove_all_mem_region_entries(struct ne_enclave * ne_enclave)1389 static void ne_enclave_remove_all_mem_region_entries(struct ne_enclave *ne_enclave)
1390 {
1391 unsigned long i = 0;
1392 struct ne_mem_region *ne_mem_region = NULL;
1393 struct ne_mem_region *ne_mem_region_tmp = NULL;
1394
1395 list_for_each_entry_safe(ne_mem_region, ne_mem_region_tmp,
1396 &ne_enclave->mem_regions_list,
1397 mem_region_list_entry) {
1398 list_del(&ne_mem_region->mem_region_list_entry);
1399
1400 for (i = 0; i < ne_mem_region->nr_pages; i++)
1401 put_page(ne_mem_region->pages[i]);
1402
1403 kfree(ne_mem_region->pages);
1404
1405 kfree(ne_mem_region);
1406 }
1407 }
1408
1409 /**
1410 * ne_enclave_remove_all_vcpu_id_entries() - Remove all vCPU id entries from
1411 * the enclave data structure.
1412 * @ne_enclave : Private data associated with the current enclave.
1413 *
1414 * Context: Process context. This function is called with the ne_enclave mutex held.
1415 */
ne_enclave_remove_all_vcpu_id_entries(struct ne_enclave * ne_enclave)1416 static void ne_enclave_remove_all_vcpu_id_entries(struct ne_enclave *ne_enclave)
1417 {
1418 unsigned int cpu = 0;
1419 unsigned int i = 0;
1420
1421 mutex_lock(&ne_cpu_pool.mutex);
1422
1423 for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) {
1424 for_each_cpu(cpu, ne_enclave->threads_per_core[i])
1425 /* Update the available NE CPU pool. */
1426 cpumask_set_cpu(cpu, ne_cpu_pool.avail_threads_per_core[i]);
1427
1428 free_cpumask_var(ne_enclave->threads_per_core[i]);
1429 }
1430
1431 mutex_unlock(&ne_cpu_pool.mutex);
1432
1433 kfree(ne_enclave->threads_per_core);
1434
1435 free_cpumask_var(ne_enclave->vcpu_ids);
1436 }
1437
1438 /**
1439 * ne_pci_dev_remove_enclave_entry() - Remove the enclave entry from the data
1440 * structure that is part of the NE PCI
1441 * device private data.
1442 * @ne_enclave : Private data associated with the current enclave.
1443 * @ne_pci_dev : Private data associated with the PCI device.
1444 *
1445 * Context: Process context. This function is called with the ne_pci_dev enclave
1446 * mutex held.
1447 */
ne_pci_dev_remove_enclave_entry(struct ne_enclave * ne_enclave,struct ne_pci_dev * ne_pci_dev)1448 static void ne_pci_dev_remove_enclave_entry(struct ne_enclave *ne_enclave,
1449 struct ne_pci_dev *ne_pci_dev)
1450 {
1451 struct ne_enclave *ne_enclave_entry = NULL;
1452 struct ne_enclave *ne_enclave_entry_tmp = NULL;
1453
1454 list_for_each_entry_safe(ne_enclave_entry, ne_enclave_entry_tmp,
1455 &ne_pci_dev->enclaves_list, enclave_list_entry) {
1456 if (ne_enclave_entry->slot_uid == ne_enclave->slot_uid) {
1457 list_del(&ne_enclave_entry->enclave_list_entry);
1458
1459 break;
1460 }
1461 }
1462 }
1463
1464 /**
1465 * ne_enclave_release() - Release function provided by the enclave file.
1466 * @inode: Inode associated with this file release function.
1467 * @file: File associated with this release function.
1468 *
1469 * Context: Process context.
1470 * Return:
1471 * * 0 on success.
1472 * * Negative return value on failure.
1473 */
ne_enclave_release(struct inode * inode,struct file * file)1474 static int ne_enclave_release(struct inode *inode, struct file *file)
1475 {
1476 struct ne_pci_dev_cmd_reply cmd_reply = {};
1477 struct enclave_stop_req enclave_stop_request = {};
1478 struct ne_enclave *ne_enclave = file->private_data;
1479 struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev;
1480 struct pci_dev *pdev = ne_pci_dev->pdev;
1481 int rc = -EINVAL;
1482 struct slot_free_req slot_free_req = {};
1483
1484 if (!ne_enclave)
1485 return 0;
1486
1487 /*
1488 * Early exit in case there is an error in the enclave creation logic
1489 * and fput() is called on the cleanup path.
1490 */
1491 if (!ne_enclave->slot_uid)
1492 return 0;
1493
1494 /*
1495 * Acquire the enclave list mutex before the enclave mutex
1496 * in order to avoid deadlocks with @ref ne_event_work_handler.
1497 */
1498 mutex_lock(&ne_pci_dev->enclaves_list_mutex);
1499 mutex_lock(&ne_enclave->enclave_info_mutex);
1500
1501 if (ne_enclave->state != NE_STATE_INIT && ne_enclave->state != NE_STATE_STOPPED) {
1502 enclave_stop_request.slot_uid = ne_enclave->slot_uid;
1503
1504 rc = ne_do_request(pdev, ENCLAVE_STOP,
1505 &enclave_stop_request, sizeof(enclave_stop_request),
1506 &cmd_reply, sizeof(cmd_reply));
1507 if (rc < 0) {
1508 dev_err_ratelimited(ne_misc_dev.this_device,
1509 "Error in enclave stop [rc=%d]\n", rc);
1510
1511 goto unlock_mutex;
1512 }
1513
1514 memset(&cmd_reply, 0, sizeof(cmd_reply));
1515 }
1516
1517 slot_free_req.slot_uid = ne_enclave->slot_uid;
1518
1519 rc = ne_do_request(pdev, SLOT_FREE,
1520 &slot_free_req, sizeof(slot_free_req),
1521 &cmd_reply, sizeof(cmd_reply));
1522 if (rc < 0) {
1523 dev_err_ratelimited(ne_misc_dev.this_device,
1524 "Error in slot free [rc=%d]\n", rc);
1525
1526 goto unlock_mutex;
1527 }
1528
1529 ne_pci_dev_remove_enclave_entry(ne_enclave, ne_pci_dev);
1530 ne_enclave_remove_all_mem_region_entries(ne_enclave);
1531 ne_enclave_remove_all_vcpu_id_entries(ne_enclave);
1532
1533 mutex_unlock(&ne_enclave->enclave_info_mutex);
1534 mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
1535
1536 kfree(ne_enclave);
1537
1538 return 0;
1539
1540 unlock_mutex:
1541 mutex_unlock(&ne_enclave->enclave_info_mutex);
1542 mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
1543
1544 return rc;
1545 }
1546
1547 /**
1548 * ne_enclave_poll() - Poll functionality used for enclave out-of-band events.
1549 * @file: File associated with this poll function.
1550 * @wait: Poll table data structure.
1551 *
1552 * Context: Process context.
1553 * Return:
1554 * * Poll mask.
1555 */
ne_enclave_poll(struct file * file,poll_table * wait)1556 static __poll_t ne_enclave_poll(struct file *file, poll_table *wait)
1557 {
1558 __poll_t mask = 0;
1559 struct ne_enclave *ne_enclave = file->private_data;
1560
1561 poll_wait(file, &ne_enclave->eventq, wait);
1562
1563 if (ne_enclave->has_event)
1564 mask |= EPOLLHUP;
1565
1566 return mask;
1567 }
1568
1569 static const struct file_operations ne_enclave_fops = {
1570 .owner = THIS_MODULE,
1571 .llseek = noop_llseek,
1572 .poll = ne_enclave_poll,
1573 .unlocked_ioctl = ne_enclave_ioctl,
1574 .release = ne_enclave_release,
1575 };
1576
1577 /**
1578 * ne_create_vm_ioctl() - Alloc slot to be associated with an enclave. Create
1579 * enclave file descriptor to be further used for enclave
1580 * resources handling e.g. memory regions and CPUs.
1581 * @ne_pci_dev : Private data associated with the PCI device.
1582 * @slot_uid: User pointer to store the generated unique slot id
1583 * associated with an enclave to.
1584 *
1585 * Context: Process context. This function is called with the ne_pci_dev enclave
1586 * mutex held.
1587 * Return:
1588 * * Enclave fd on success.
1589 * * Negative return value on failure.
1590 */
ne_create_vm_ioctl(struct ne_pci_dev * ne_pci_dev,u64 __user * slot_uid)1591 static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 __user *slot_uid)
1592 {
1593 struct ne_pci_dev_cmd_reply cmd_reply = {};
1594 int enclave_fd = -1;
1595 struct file *enclave_file = NULL;
1596 unsigned int i = 0;
1597 struct ne_enclave *ne_enclave = NULL;
1598 struct pci_dev *pdev = ne_pci_dev->pdev;
1599 int rc = -EINVAL;
1600 struct slot_alloc_req slot_alloc_req = {};
1601
1602 mutex_lock(&ne_cpu_pool.mutex);
1603
1604 for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
1605 if (!cpumask_empty(ne_cpu_pool.avail_threads_per_core[i]))
1606 break;
1607
1608 if (i == ne_cpu_pool.nr_parent_vm_cores) {
1609 dev_err_ratelimited(ne_misc_dev.this_device,
1610 "No CPUs available in CPU pool\n");
1611
1612 mutex_unlock(&ne_cpu_pool.mutex);
1613
1614 return -NE_ERR_NO_CPUS_AVAIL_IN_POOL;
1615 }
1616
1617 mutex_unlock(&ne_cpu_pool.mutex);
1618
1619 ne_enclave = kzalloc_obj(*ne_enclave, GFP_KERNEL);
1620 if (!ne_enclave)
1621 return -ENOMEM;
1622
1623 mutex_lock(&ne_cpu_pool.mutex);
1624
1625 ne_enclave->nr_parent_vm_cores = ne_cpu_pool.nr_parent_vm_cores;
1626 ne_enclave->nr_threads_per_core = ne_cpu_pool.nr_threads_per_core;
1627 ne_enclave->numa_node = ne_cpu_pool.numa_node;
1628
1629 mutex_unlock(&ne_cpu_pool.mutex);
1630
1631 ne_enclave->threads_per_core = kzalloc_objs(*ne_enclave->threads_per_core,
1632 ne_enclave->nr_parent_vm_cores,
1633 GFP_KERNEL);
1634 if (!ne_enclave->threads_per_core) {
1635 rc = -ENOMEM;
1636
1637 goto free_ne_enclave;
1638 }
1639
1640 for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++)
1641 if (!zalloc_cpumask_var(&ne_enclave->threads_per_core[i], GFP_KERNEL)) {
1642 rc = -ENOMEM;
1643
1644 goto free_cpumask;
1645 }
1646
1647 if (!zalloc_cpumask_var(&ne_enclave->vcpu_ids, GFP_KERNEL)) {
1648 rc = -ENOMEM;
1649
1650 goto free_cpumask;
1651 }
1652
1653 enclave_fd = get_unused_fd_flags(O_CLOEXEC);
1654 if (enclave_fd < 0) {
1655 rc = enclave_fd;
1656
1657 dev_err_ratelimited(ne_misc_dev.this_device,
1658 "Error in getting unused fd [rc=%d]\n", rc);
1659
1660 goto free_cpumask;
1661 }
1662
1663 enclave_file = anon_inode_getfile("ne-vm", &ne_enclave_fops, ne_enclave, O_RDWR);
1664 if (IS_ERR(enclave_file)) {
1665 rc = PTR_ERR(enclave_file);
1666
1667 dev_err_ratelimited(ne_misc_dev.this_device,
1668 "Error in anon inode get file [rc=%d]\n", rc);
1669
1670 goto put_fd;
1671 }
1672
1673 rc = ne_do_request(pdev, SLOT_ALLOC,
1674 &slot_alloc_req, sizeof(slot_alloc_req),
1675 &cmd_reply, sizeof(cmd_reply));
1676 if (rc < 0) {
1677 dev_err_ratelimited(ne_misc_dev.this_device,
1678 "Error in slot alloc [rc=%d]\n", rc);
1679
1680 goto put_file;
1681 }
1682
1683 init_waitqueue_head(&ne_enclave->eventq);
1684 ne_enclave->has_event = false;
1685 mutex_init(&ne_enclave->enclave_info_mutex);
1686 ne_enclave->max_mem_regions = cmd_reply.mem_regions;
1687 INIT_LIST_HEAD(&ne_enclave->mem_regions_list);
1688 ne_enclave->mm = current->mm;
1689 ne_enclave->slot_uid = cmd_reply.slot_uid;
1690 ne_enclave->state = NE_STATE_INIT;
1691
1692 list_add(&ne_enclave->enclave_list_entry, &ne_pci_dev->enclaves_list);
1693
1694 if (copy_to_user(slot_uid, &ne_enclave->slot_uid, sizeof(ne_enclave->slot_uid))) {
1695 /*
1696 * As we're holding the only reference to 'enclave_file', fput()
1697 * will call ne_enclave_release() which will do a proper cleanup
1698 * of all so far allocated resources, leaving only the unused fd
1699 * for us to free.
1700 */
1701 fput(enclave_file);
1702 put_unused_fd(enclave_fd);
1703
1704 return -EFAULT;
1705 }
1706
1707 fd_install(enclave_fd, enclave_file);
1708
1709 return enclave_fd;
1710
1711 put_file:
1712 fput(enclave_file);
1713 put_fd:
1714 put_unused_fd(enclave_fd);
1715 free_cpumask:
1716 free_cpumask_var(ne_enclave->vcpu_ids);
1717 for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++)
1718 free_cpumask_var(ne_enclave->threads_per_core[i]);
1719 kfree(ne_enclave->threads_per_core);
1720 free_ne_enclave:
1721 kfree(ne_enclave);
1722
1723 return rc;
1724 }
1725
1726 /**
1727 * ne_ioctl() - Ioctl function provided by the NE misc device.
1728 * @file: File associated with this ioctl function.
1729 * @cmd: The command that is set for the ioctl call.
1730 * @arg: The argument that is provided for the ioctl call.
1731 *
1732 * Context: Process context.
1733 * Return:
1734 * * Ioctl result (e.g. enclave file descriptor) on success.
1735 * * Negative return value on failure.
1736 */
ne_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1737 static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1738 {
1739 switch (cmd) {
1740 case NE_CREATE_VM: {
1741 int enclave_fd = -1;
1742 struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev;
1743 u64 __user *slot_uid = (void __user *)arg;
1744
1745 mutex_lock(&ne_pci_dev->enclaves_list_mutex);
1746 enclave_fd = ne_create_vm_ioctl(ne_pci_dev, slot_uid);
1747 mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
1748
1749 return enclave_fd;
1750 }
1751
1752 default:
1753 return -ENOTTY;
1754 }
1755
1756 return 0;
1757 }
1758
1759 #if defined(CONFIG_NITRO_ENCLAVES_MISC_DEV_TEST)
1760 #include "ne_misc_dev_test.c"
1761 #endif
1762
ne_init(void)1763 static int __init ne_init(void)
1764 {
1765 mutex_init(&ne_cpu_pool.mutex);
1766
1767 return pci_register_driver(&ne_pci_driver);
1768 }
1769
ne_exit(void)1770 static void __exit ne_exit(void)
1771 {
1772 pci_unregister_driver(&ne_pci_driver);
1773
1774 ne_teardown_cpu_pool();
1775 }
1776
1777 module_init(ne_init);
1778 module_exit(ne_exit);
1779
1780 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
1781 MODULE_DESCRIPTION("Nitro Enclaves Driver");
1782 MODULE_LICENSE("GPL v2");
1783