Lines Matching refs:ne_enclave

480 static bool ne_donated_cpu(struct ne_enclave *ne_enclave, unsigned int cpu)  in ne_donated_cpu()  argument
482 if (cpumask_test_cpu(cpu, ne_enclave->vcpu_ids)) in ne_donated_cpu()
527 static int ne_set_enclave_threads_per_core(struct ne_enclave *ne_enclave, in ne_set_enclave_threads_per_core() argument
546 if (core_id >= ne_enclave->nr_parent_vm_cores) { in ne_set_enclave_threads_per_core()
554 cpumask_set_cpu(cpu, ne_enclave->threads_per_core[core_id]); in ne_set_enclave_threads_per_core()
573 static int ne_get_cpu_from_cpu_pool(struct ne_enclave *ne_enclave, u32 *vcpu_id) in ne_get_cpu_from_cpu_pool() argument
585 for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) in ne_get_cpu_from_cpu_pool()
586 for_each_cpu(cpu, ne_enclave->threads_per_core[i]) in ne_get_cpu_from_cpu_pool()
587 if (!ne_donated_cpu(ne_enclave, cpu)) { in ne_get_cpu_from_cpu_pool()
601 rc = ne_set_enclave_threads_per_core(ne_enclave, core_id, *vcpu_id); in ne_get_cpu_from_cpu_pool()
605 *vcpu_id = cpumask_any(ne_enclave->threads_per_core[core_id]); in ne_get_cpu_from_cpu_pool()
652 static int ne_check_cpu_in_cpu_pool(struct ne_enclave *ne_enclave, u32 vcpu_id) in ne_check_cpu_in_cpu_pool() argument
658 if (ne_donated_cpu(ne_enclave, vcpu_id)) { in ne_check_cpu_in_cpu_pool()
669 for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) in ne_check_cpu_in_cpu_pool()
670 if (cpumask_test_cpu(vcpu_id, ne_enclave->threads_per_core[i])) in ne_check_cpu_in_cpu_pool()
682 rc = ne_set_enclave_threads_per_core(ne_enclave, core_id, vcpu_id); in ne_check_cpu_in_cpu_pool()
706 static int ne_add_vcpu_ioctl(struct ne_enclave *ne_enclave, u32 vcpu_id) in ne_add_vcpu_ioctl() argument
713 if (ne_enclave->mm != current->mm) in ne_add_vcpu_ioctl()
716 slot_add_vcpu_req.slot_uid = ne_enclave->slot_uid; in ne_add_vcpu_ioctl()
729 cpumask_set_cpu(vcpu_id, ne_enclave->vcpu_ids); in ne_add_vcpu_ioctl()
731 ne_enclave->nr_vcpus++; in ne_add_vcpu_ioctl()
748 static int ne_sanity_check_user_mem_region(struct ne_enclave *ne_enclave, in ne_sanity_check_user_mem_region() argument
753 if (ne_enclave->mm != current->mm) in ne_sanity_check_user_mem_region()
779 list_for_each_entry(ne_mem_region, &ne_enclave->mem_regions_list, in ne_sanity_check_user_mem_region()
810 static int ne_sanity_check_user_mem_region_page(struct ne_enclave *ne_enclave, in ne_sanity_check_user_mem_region_page() argument
827 if (ne_enclave->numa_node != page_to_nid(mem_region_page)) { in ne_sanity_check_user_mem_region_page()
830 ne_enclave->numa_node); in ne_sanity_check_user_mem_region_page()
915 static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave, in ne_set_user_memory_region_ioctl() argument
927 rc = ne_sanity_check_user_mem_region(ne_enclave, mem_region); in ne_set_user_memory_region_ioctl()
978 rc = ne_sanity_check_user_mem_region_page(ne_enclave, ne_mem_region->pages[i]); in ne_set_user_memory_region_ioctl()
993 if ((ne_enclave->nr_mem_regions + phys_contig_mem_regions.num) > in ne_set_user_memory_region_ioctl()
994 ne_enclave->max_mem_regions) { in ne_set_user_memory_region_ioctl()
997 ne_enclave->max_mem_regions); in ne_set_user_memory_region_ioctl()
1016 list_add(&ne_mem_region->mem_region_list_entry, &ne_enclave->mem_regions_list); in ne_set_user_memory_region_ioctl()
1022 slot_add_mem_req.slot_uid = ne_enclave->slot_uid; in ne_set_user_memory_region_ioctl()
1042 ne_enclave->mem_size += slot_add_mem_req.size; in ne_set_user_memory_region_ioctl()
1043 ne_enclave->nr_mem_regions++; in ne_set_user_memory_region_ioctl()
1072 static int ne_start_enclave_ioctl(struct ne_enclave *ne_enclave, in ne_start_enclave_ioctl() argument
1082 if (!ne_enclave->nr_mem_regions) { in ne_start_enclave_ioctl()
1089 if (ne_enclave->mem_size < NE_MIN_ENCLAVE_MEM_SIZE) { in ne_start_enclave_ioctl()
1097 if (!ne_enclave->nr_vcpus) { in ne_start_enclave_ioctl()
1104 for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) in ne_start_enclave_ioctl()
1105 for_each_cpu(cpu, ne_enclave->threads_per_core[i]) in ne_start_enclave_ioctl()
1106 if (!cpumask_test_cpu(cpu, ne_enclave->vcpu_ids)) { in ne_start_enclave_ioctl()
1115 enclave_start_req.slot_uid = ne_enclave->slot_uid; in ne_start_enclave_ioctl()
1127 ne_enclave->state = NE_STATE_RUNNING; in ne_start_enclave_ioctl()
1147 struct ne_enclave *ne_enclave = file->private_data; in ne_enclave_ioctl() local
1157 mutex_lock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1159 if (ne_enclave->state != NE_STATE_INIT) { in ne_enclave_ioctl()
1163 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1168 if (vcpu_id >= (ne_enclave->nr_parent_vm_cores * in ne_enclave_ioctl()
1169 ne_enclave->nr_threads_per_core)) { in ne_enclave_ioctl()
1173 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1180 rc = ne_get_cpu_from_cpu_pool(ne_enclave, &vcpu_id); in ne_enclave_ioctl()
1186 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1192 rc = ne_check_cpu_in_cpu_pool(ne_enclave, vcpu_id); in ne_enclave_ioctl()
1198 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1204 rc = ne_add_vcpu_ioctl(ne_enclave, vcpu_id); in ne_enclave_ioctl()
1206 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1211 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1225 mutex_lock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1227 if (ne_enclave->state != NE_STATE_INIT) { in ne_enclave_ioctl()
1231 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1236 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1269 mutex_lock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1271 if (ne_enclave->state != NE_STATE_INIT) { in ne_enclave_ioctl()
1275 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1280 rc = ne_set_user_memory_region_ioctl(ne_enclave, mem_region); in ne_enclave_ioctl()
1282 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1287 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1349 mutex_lock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1351 if (ne_enclave->state != NE_STATE_INIT) { in ne_enclave_ioctl()
1355 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1360 rc = ne_start_enclave_ioctl(ne_enclave, &enclave_start_info); in ne_enclave_ioctl()
1362 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1367 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1390 static void ne_enclave_remove_all_mem_region_entries(struct ne_enclave *ne_enclave) in ne_enclave_remove_all_mem_region_entries() argument
1397 &ne_enclave->mem_regions_list, in ne_enclave_remove_all_mem_region_entries()
1417 static void ne_enclave_remove_all_vcpu_id_entries(struct ne_enclave *ne_enclave) in ne_enclave_remove_all_vcpu_id_entries() argument
1424 for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) { in ne_enclave_remove_all_vcpu_id_entries()
1425 for_each_cpu(cpu, ne_enclave->threads_per_core[i]) in ne_enclave_remove_all_vcpu_id_entries()
1429 free_cpumask_var(ne_enclave->threads_per_core[i]); in ne_enclave_remove_all_vcpu_id_entries()
1434 kfree(ne_enclave->threads_per_core); in ne_enclave_remove_all_vcpu_id_entries()
1436 free_cpumask_var(ne_enclave->vcpu_ids); in ne_enclave_remove_all_vcpu_id_entries()
1449 static void ne_pci_dev_remove_enclave_entry(struct ne_enclave *ne_enclave, in ne_pci_dev_remove_enclave_entry() argument
1452 struct ne_enclave *ne_enclave_entry = NULL; in ne_pci_dev_remove_enclave_entry()
1453 struct ne_enclave *ne_enclave_entry_tmp = NULL; in ne_pci_dev_remove_enclave_entry()
1457 if (ne_enclave_entry->slot_uid == ne_enclave->slot_uid) { in ne_pci_dev_remove_enclave_entry()
1479 struct ne_enclave *ne_enclave = file->private_data; in ne_enclave_release() local
1485 if (!ne_enclave) in ne_enclave_release()
1492 if (!ne_enclave->slot_uid) in ne_enclave_release()
1500 mutex_lock(&ne_enclave->enclave_info_mutex); in ne_enclave_release()
1502 if (ne_enclave->state != NE_STATE_INIT && ne_enclave->state != NE_STATE_STOPPED) { in ne_enclave_release()
1503 enclave_stop_request.slot_uid = ne_enclave->slot_uid; in ne_enclave_release()
1518 slot_free_req.slot_uid = ne_enclave->slot_uid; in ne_enclave_release()
1530 ne_pci_dev_remove_enclave_entry(ne_enclave, ne_pci_dev); in ne_enclave_release()
1531 ne_enclave_remove_all_mem_region_entries(ne_enclave); in ne_enclave_release()
1532 ne_enclave_remove_all_vcpu_id_entries(ne_enclave); in ne_enclave_release()
1534 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_release()
1537 kfree(ne_enclave); in ne_enclave_release()
1542 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_release()
1560 struct ne_enclave *ne_enclave = file->private_data; in ne_enclave_poll() local
1562 poll_wait(file, &ne_enclave->eventq, wait); in ne_enclave_poll()
1564 if (ne_enclave->has_event) in ne_enclave_poll()
1598 struct ne_enclave *ne_enclave = NULL; in ne_create_vm_ioctl() local
1620 ne_enclave = kzalloc(sizeof(*ne_enclave), GFP_KERNEL); in ne_create_vm_ioctl()
1621 if (!ne_enclave) in ne_create_vm_ioctl()
1626 ne_enclave->nr_parent_vm_cores = ne_cpu_pool.nr_parent_vm_cores; in ne_create_vm_ioctl()
1627 ne_enclave->nr_threads_per_core = ne_cpu_pool.nr_threads_per_core; in ne_create_vm_ioctl()
1628 ne_enclave->numa_node = ne_cpu_pool.numa_node; in ne_create_vm_ioctl()
1632 ne_enclave->threads_per_core = kcalloc(ne_enclave->nr_parent_vm_cores, in ne_create_vm_ioctl()
1633 sizeof(*ne_enclave->threads_per_core), in ne_create_vm_ioctl()
1635 if (!ne_enclave->threads_per_core) { in ne_create_vm_ioctl()
1641 for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) in ne_create_vm_ioctl()
1642 if (!zalloc_cpumask_var(&ne_enclave->threads_per_core[i], GFP_KERNEL)) { in ne_create_vm_ioctl()
1648 if (!zalloc_cpumask_var(&ne_enclave->vcpu_ids, GFP_KERNEL)) { in ne_create_vm_ioctl()
1664 enclave_file = anon_inode_getfile("ne-vm", &ne_enclave_fops, ne_enclave, O_RDWR); in ne_create_vm_ioctl()
1684 init_waitqueue_head(&ne_enclave->eventq); in ne_create_vm_ioctl()
1685 ne_enclave->has_event = false; in ne_create_vm_ioctl()
1686 mutex_init(&ne_enclave->enclave_info_mutex); in ne_create_vm_ioctl()
1687 ne_enclave->max_mem_regions = cmd_reply.mem_regions; in ne_create_vm_ioctl()
1688 INIT_LIST_HEAD(&ne_enclave->mem_regions_list); in ne_create_vm_ioctl()
1689 ne_enclave->mm = current->mm; in ne_create_vm_ioctl()
1690 ne_enclave->slot_uid = cmd_reply.slot_uid; in ne_create_vm_ioctl()
1691 ne_enclave->state = NE_STATE_INIT; in ne_create_vm_ioctl()
1693 list_add(&ne_enclave->enclave_list_entry, &ne_pci_dev->enclaves_list); in ne_create_vm_ioctl()
1695 if (copy_to_user(slot_uid, &ne_enclave->slot_uid, sizeof(ne_enclave->slot_uid))) { in ne_create_vm_ioctl()
1717 free_cpumask_var(ne_enclave->vcpu_ids); in ne_create_vm_ioctl()
1718 for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) in ne_create_vm_ioctl()
1719 free_cpumask_var(ne_enclave->threads_per_core[i]); in ne_create_vm_ioctl()
1720 kfree(ne_enclave->threads_per_core); in ne_create_vm_ioctl()
1722 kfree(ne_enclave); in ne_create_vm_ioctl()