1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ACRN Hypervisor Service Module (HSM) 4 * 5 * Copyright (C) 2020 Intel Corporation. All rights reserved. 6 * 7 * Authors: 8 * Fengwei Yin <fengwei.yin@intel.com> 9 * Yakui Zhao <yakui.zhao@intel.com> 10 */ 11 12 #include <linux/cpu.h> 13 #include <linux/io.h> 14 #include <linux/mm.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 18 #include <asm/acrn.h> 19 #include <asm/hypervisor.h> 20 21 #include "acrn_drv.h" 22 23 /* 24 * When /dev/acrn_hsm is opened, a 'struct acrn_vm' object is created to 25 * represent a VM instance and continues to be associated with the opened file 26 * descriptor. All ioctl operations on this file descriptor will be targeted to 27 * the VM instance. Release of this file descriptor will destroy the object. 28 */ 29 static int acrn_dev_open(struct inode *inode, struct file *filp) 30 { 31 struct acrn_vm *vm; 32 33 vm = kzalloc(sizeof(*vm), GFP_KERNEL); 34 if (!vm) 35 return -ENOMEM; 36 37 vm->vmid = ACRN_INVALID_VMID; 38 filp->private_data = vm; 39 return 0; 40 } 41 42 static int pmcmd_ioctl(u64 cmd, void __user *uptr) 43 { 44 struct acrn_pstate_data *px_data; 45 struct acrn_cstate_data *cx_data; 46 u64 *pm_info; 47 int ret = 0; 48 49 switch (cmd & PMCMD_TYPE_MASK) { 50 case ACRN_PMCMD_GET_PX_CNT: 51 case ACRN_PMCMD_GET_CX_CNT: 52 pm_info = kmalloc(sizeof(u64), GFP_KERNEL); 53 if (!pm_info) 54 return -ENOMEM; 55 56 ret = hcall_get_cpu_state(cmd, virt_to_phys(pm_info)); 57 if (ret < 0) { 58 kfree(pm_info); 59 break; 60 } 61 62 if (copy_to_user(uptr, pm_info, sizeof(u64))) 63 ret = -EFAULT; 64 kfree(pm_info); 65 break; 66 case ACRN_PMCMD_GET_PX_DATA: 67 px_data = kmalloc(sizeof(*px_data), GFP_KERNEL); 68 if (!px_data) 69 return -ENOMEM; 70 71 ret = hcall_get_cpu_state(cmd, virt_to_phys(px_data)); 72 if (ret < 0) { 73 kfree(px_data); 74 break; 75 } 76 77 if (copy_to_user(uptr, px_data, sizeof(*px_data))) 78 ret = -EFAULT; 79 kfree(px_data); 80 break; 81 case ACRN_PMCMD_GET_CX_DATA: 82 cx_data = kmalloc(sizeof(*cx_data), GFP_KERNEL); 83 if (!cx_data) 84 return -ENOMEM; 85 86 ret = hcall_get_cpu_state(cmd, virt_to_phys(cx_data)); 87 if (ret < 0) { 88 kfree(cx_data); 89 break; 90 } 91 92 if (copy_to_user(uptr, cx_data, sizeof(*cx_data))) 93 ret = -EFAULT; 94 kfree(cx_data); 95 break; 96 default: 97 break; 98 } 99 100 return ret; 101 } 102 103 /* 104 * HSM relies on hypercall layer of the ACRN hypervisor to do the 105 * sanity check against the input parameters. 106 */ 107 static long acrn_dev_ioctl(struct file *filp, unsigned int cmd, 108 unsigned long ioctl_param) 109 { 110 struct acrn_vm *vm = filp->private_data; 111 struct acrn_vm_creation *vm_param; 112 struct acrn_vcpu_regs *cpu_regs; 113 struct acrn_ioreq_notify notify; 114 struct acrn_ptdev_irq *irq_info; 115 struct acrn_ioeventfd ioeventfd; 116 struct acrn_vm_memmap memmap; 117 struct acrn_msi_entry *msi; 118 struct acrn_pcidev *pcidev; 119 struct acrn_irqfd irqfd; 120 struct page *page; 121 u64 cstate_cmd; 122 int i, ret = 0; 123 124 if (vm->vmid == ACRN_INVALID_VMID && cmd != ACRN_IOCTL_CREATE_VM) { 125 dev_dbg(acrn_dev.this_device, 126 "ioctl 0x%x: Invalid VM state!\n", cmd); 127 return -EINVAL; 128 } 129 130 switch (cmd) { 131 case ACRN_IOCTL_CREATE_VM: 132 vm_param = memdup_user((void __user *)ioctl_param, 133 sizeof(struct acrn_vm_creation)); 134 if (IS_ERR(vm_param)) 135 return PTR_ERR(vm_param); 136 137 if ((vm_param->reserved0 | vm_param->reserved1) != 0) 138 return -EINVAL; 139 140 vm = acrn_vm_create(vm, vm_param); 141 if (!vm) { 142 ret = -EINVAL; 143 kfree(vm_param); 144 break; 145 } 146 147 if (copy_to_user((void __user *)ioctl_param, vm_param, 148 sizeof(struct acrn_vm_creation))) { 149 acrn_vm_destroy(vm); 150 ret = -EFAULT; 151 } 152 153 kfree(vm_param); 154 break; 155 case ACRN_IOCTL_START_VM: 156 ret = hcall_start_vm(vm->vmid); 157 if (ret < 0) 158 dev_dbg(acrn_dev.this_device, 159 "Failed to start VM %u!\n", vm->vmid); 160 break; 161 case ACRN_IOCTL_PAUSE_VM: 162 ret = hcall_pause_vm(vm->vmid); 163 if (ret < 0) 164 dev_dbg(acrn_dev.this_device, 165 "Failed to pause VM %u!\n", vm->vmid); 166 break; 167 case ACRN_IOCTL_RESET_VM: 168 ret = hcall_reset_vm(vm->vmid); 169 if (ret < 0) 170 dev_dbg(acrn_dev.this_device, 171 "Failed to restart VM %u!\n", vm->vmid); 172 break; 173 case ACRN_IOCTL_DESTROY_VM: 174 ret = acrn_vm_destroy(vm); 175 break; 176 case ACRN_IOCTL_SET_VCPU_REGS: 177 cpu_regs = memdup_user((void __user *)ioctl_param, 178 sizeof(struct acrn_vcpu_regs)); 179 if (IS_ERR(cpu_regs)) 180 return PTR_ERR(cpu_regs); 181 182 for (i = 0; i < ARRAY_SIZE(cpu_regs->reserved); i++) 183 if (cpu_regs->reserved[i]) 184 return -EINVAL; 185 186 for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_32); i++) 187 if (cpu_regs->vcpu_regs.reserved_32[i]) 188 return -EINVAL; 189 190 for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_64); i++) 191 if (cpu_regs->vcpu_regs.reserved_64[i]) 192 return -EINVAL; 193 194 for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.gdt.reserved); i++) 195 if (cpu_regs->vcpu_regs.gdt.reserved[i] | 196 cpu_regs->vcpu_regs.idt.reserved[i]) 197 return -EINVAL; 198 199 ret = hcall_set_vcpu_regs(vm->vmid, virt_to_phys(cpu_regs)); 200 if (ret < 0) 201 dev_dbg(acrn_dev.this_device, 202 "Failed to set regs state of VM%u!\n", 203 vm->vmid); 204 kfree(cpu_regs); 205 break; 206 case ACRN_IOCTL_SET_MEMSEG: 207 if (copy_from_user(&memmap, (void __user *)ioctl_param, 208 sizeof(memmap))) 209 return -EFAULT; 210 211 ret = acrn_vm_memseg_map(vm, &memmap); 212 break; 213 case ACRN_IOCTL_UNSET_MEMSEG: 214 if (copy_from_user(&memmap, (void __user *)ioctl_param, 215 sizeof(memmap))) 216 return -EFAULT; 217 218 ret = acrn_vm_memseg_unmap(vm, &memmap); 219 break; 220 case ACRN_IOCTL_ASSIGN_PCIDEV: 221 pcidev = memdup_user((void __user *)ioctl_param, 222 sizeof(struct acrn_pcidev)); 223 if (IS_ERR(pcidev)) 224 return PTR_ERR(pcidev); 225 226 ret = hcall_assign_pcidev(vm->vmid, virt_to_phys(pcidev)); 227 if (ret < 0) 228 dev_dbg(acrn_dev.this_device, 229 "Failed to assign pci device!\n"); 230 kfree(pcidev); 231 break; 232 case ACRN_IOCTL_DEASSIGN_PCIDEV: 233 pcidev = memdup_user((void __user *)ioctl_param, 234 sizeof(struct acrn_pcidev)); 235 if (IS_ERR(pcidev)) 236 return PTR_ERR(pcidev); 237 238 ret = hcall_deassign_pcidev(vm->vmid, virt_to_phys(pcidev)); 239 if (ret < 0) 240 dev_dbg(acrn_dev.this_device, 241 "Failed to deassign pci device!\n"); 242 kfree(pcidev); 243 break; 244 case ACRN_IOCTL_SET_PTDEV_INTR: 245 irq_info = memdup_user((void __user *)ioctl_param, 246 sizeof(struct acrn_ptdev_irq)); 247 if (IS_ERR(irq_info)) 248 return PTR_ERR(irq_info); 249 250 ret = hcall_set_ptdev_intr(vm->vmid, virt_to_phys(irq_info)); 251 if (ret < 0) 252 dev_dbg(acrn_dev.this_device, 253 "Failed to configure intr for ptdev!\n"); 254 kfree(irq_info); 255 break; 256 case ACRN_IOCTL_RESET_PTDEV_INTR: 257 irq_info = memdup_user((void __user *)ioctl_param, 258 sizeof(struct acrn_ptdev_irq)); 259 if (IS_ERR(irq_info)) 260 return PTR_ERR(irq_info); 261 262 ret = hcall_reset_ptdev_intr(vm->vmid, virt_to_phys(irq_info)); 263 if (ret < 0) 264 dev_dbg(acrn_dev.this_device, 265 "Failed to reset intr for ptdev!\n"); 266 kfree(irq_info); 267 break; 268 case ACRN_IOCTL_SET_IRQLINE: 269 ret = hcall_set_irqline(vm->vmid, ioctl_param); 270 if (ret < 0) 271 dev_dbg(acrn_dev.this_device, 272 "Failed to set interrupt line!\n"); 273 break; 274 case ACRN_IOCTL_INJECT_MSI: 275 msi = memdup_user((void __user *)ioctl_param, 276 sizeof(struct acrn_msi_entry)); 277 if (IS_ERR(msi)) 278 return PTR_ERR(msi); 279 280 ret = hcall_inject_msi(vm->vmid, virt_to_phys(msi)); 281 if (ret < 0) 282 dev_dbg(acrn_dev.this_device, 283 "Failed to inject MSI!\n"); 284 kfree(msi); 285 break; 286 case ACRN_IOCTL_VM_INTR_MONITOR: 287 ret = pin_user_pages_fast(ioctl_param, 1, 288 FOLL_WRITE | FOLL_LONGTERM, &page); 289 if (unlikely(ret != 1)) { 290 dev_dbg(acrn_dev.this_device, 291 "Failed to pin intr hdr buffer!\n"); 292 return -EFAULT; 293 } 294 295 ret = hcall_vm_intr_monitor(vm->vmid, page_to_phys(page)); 296 if (ret < 0) { 297 unpin_user_page(page); 298 dev_dbg(acrn_dev.this_device, 299 "Failed to monitor intr data!\n"); 300 return ret; 301 } 302 if (vm->monitor_page) 303 unpin_user_page(vm->monitor_page); 304 vm->monitor_page = page; 305 break; 306 case ACRN_IOCTL_CREATE_IOREQ_CLIENT: 307 if (vm->default_client) 308 return -EEXIST; 309 if (!acrn_ioreq_client_create(vm, NULL, NULL, true, "acrndm")) 310 ret = -EINVAL; 311 break; 312 case ACRN_IOCTL_DESTROY_IOREQ_CLIENT: 313 if (vm->default_client) 314 acrn_ioreq_client_destroy(vm->default_client); 315 break; 316 case ACRN_IOCTL_ATTACH_IOREQ_CLIENT: 317 if (vm->default_client) 318 ret = acrn_ioreq_client_wait(vm->default_client); 319 else 320 ret = -ENODEV; 321 break; 322 case ACRN_IOCTL_NOTIFY_REQUEST_FINISH: 323 if (copy_from_user(¬ify, (void __user *)ioctl_param, 324 sizeof(struct acrn_ioreq_notify))) 325 return -EFAULT; 326 327 if (notify.reserved != 0) 328 return -EINVAL; 329 330 ret = acrn_ioreq_request_default_complete(vm, notify.vcpu); 331 break; 332 case ACRN_IOCTL_CLEAR_VM_IOREQ: 333 acrn_ioreq_request_clear(vm); 334 break; 335 case ACRN_IOCTL_PM_GET_CPU_STATE: 336 if (copy_from_user(&cstate_cmd, (void __user *)ioctl_param, 337 sizeof(cstate_cmd))) 338 return -EFAULT; 339 340 ret = pmcmd_ioctl(cstate_cmd, (void __user *)ioctl_param); 341 break; 342 case ACRN_IOCTL_IOEVENTFD: 343 if (copy_from_user(&ioeventfd, (void __user *)ioctl_param, 344 sizeof(ioeventfd))) 345 return -EFAULT; 346 347 if (ioeventfd.reserved != 0) 348 return -EINVAL; 349 350 ret = acrn_ioeventfd_config(vm, &ioeventfd); 351 break; 352 case ACRN_IOCTL_IRQFD: 353 if (copy_from_user(&irqfd, (void __user *)ioctl_param, 354 sizeof(irqfd))) 355 return -EFAULT; 356 ret = acrn_irqfd_config(vm, &irqfd); 357 break; 358 default: 359 dev_dbg(acrn_dev.this_device, "Unknown IOCTL 0x%x!\n", cmd); 360 ret = -ENOTTY; 361 } 362 363 return ret; 364 } 365 366 static int acrn_dev_release(struct inode *inode, struct file *filp) 367 { 368 struct acrn_vm *vm = filp->private_data; 369 370 acrn_vm_destroy(vm); 371 kfree(vm); 372 return 0; 373 } 374 375 static ssize_t remove_cpu_store(struct device *dev, 376 struct device_attribute *attr, 377 const char *buf, size_t count) 378 { 379 u64 cpu, lapicid; 380 int ret; 381 382 if (kstrtoull(buf, 0, &cpu) < 0) 383 return -EINVAL; 384 385 if (cpu >= num_possible_cpus() || cpu == 0 || !cpu_is_hotpluggable(cpu)) 386 return -EINVAL; 387 388 if (cpu_online(cpu)) 389 remove_cpu(cpu); 390 391 lapicid = cpu_data(cpu).apicid; 392 dev_dbg(dev, "Try to remove cpu %lld with lapicid %lld\n", cpu, lapicid); 393 ret = hcall_sos_remove_cpu(lapicid); 394 if (ret < 0) { 395 dev_err(dev, "Failed to remove cpu %lld!\n", cpu); 396 goto fail_remove; 397 } 398 399 return count; 400 401 fail_remove: 402 add_cpu(cpu); 403 return ret; 404 } 405 static DEVICE_ATTR_WO(remove_cpu); 406 407 static umode_t acrn_attr_visible(struct kobject *kobj, struct attribute *a, int n) 408 { 409 if (a == &dev_attr_remove_cpu.attr) 410 return IS_ENABLED(CONFIG_HOTPLUG_CPU) ? a->mode : 0; 411 412 return a->mode; 413 } 414 415 static struct attribute *acrn_attrs[] = { 416 &dev_attr_remove_cpu.attr, 417 NULL 418 }; 419 420 static struct attribute_group acrn_attr_group = { 421 .attrs = acrn_attrs, 422 .is_visible = acrn_attr_visible, 423 }; 424 425 static const struct attribute_group *acrn_attr_groups[] = { 426 &acrn_attr_group, 427 NULL 428 }; 429 430 static const struct file_operations acrn_fops = { 431 .owner = THIS_MODULE, 432 .open = acrn_dev_open, 433 .release = acrn_dev_release, 434 .unlocked_ioctl = acrn_dev_ioctl, 435 }; 436 437 struct miscdevice acrn_dev = { 438 .minor = MISC_DYNAMIC_MINOR, 439 .name = "acrn_hsm", 440 .fops = &acrn_fops, 441 .groups = acrn_attr_groups, 442 }; 443 444 static int __init hsm_init(void) 445 { 446 int ret; 447 448 if (x86_hyper_type != X86_HYPER_ACRN) 449 return -ENODEV; 450 451 if (!(cpuid_eax(ACRN_CPUID_FEATURES) & ACRN_FEATURE_PRIVILEGED_VM)) 452 return -EPERM; 453 454 ret = misc_register(&acrn_dev); 455 if (ret) { 456 pr_err("Create misc dev failed!\n"); 457 return ret; 458 } 459 460 ret = acrn_ioreq_intr_setup(); 461 if (ret) { 462 pr_err("Setup I/O request handler failed!\n"); 463 misc_deregister(&acrn_dev); 464 return ret; 465 } 466 return 0; 467 } 468 469 static void __exit hsm_exit(void) 470 { 471 acrn_ioreq_intr_remove(); 472 misc_deregister(&acrn_dev); 473 } 474 module_init(hsm_init); 475 module_exit(hsm_exit); 476 477 MODULE_AUTHOR("Intel Corporation"); 478 MODULE_LICENSE("GPL"); 479 MODULE_DESCRIPTION("ACRN Hypervisor Service Module (HSM)"); 480