1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/kvm_host.h> 24 #include <linux/module.h> 25 #include <linux/vmalloc.h> 26 #include <linux/hrtimer.h> 27 #include <linux/fs.h> 28 #include <linux/slab.h> 29 #include <asm/cputable.h> 30 #include <asm/uaccess.h> 31 #include <asm/kvm_ppc.h> 32 #include <asm/tlbflush.h> 33 #include "timing.h" 34 #include "../mm/mmu_decl.h" 35 36 #define CREATE_TRACE_POINTS 37 #include "trace.h" 38 39 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 40 { 41 return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions); 42 } 43 44 45 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 46 { 47 enum emulation_result er; 48 int r; 49 50 er = kvmppc_emulate_instruction(run, vcpu); 51 switch (er) { 52 case EMULATE_DONE: 53 /* Future optimization: only reload non-volatiles if they were 54 * actually modified. */ 55 r = RESUME_GUEST_NV; 56 break; 57 case EMULATE_DO_MMIO: 58 run->exit_reason = KVM_EXIT_MMIO; 59 /* We must reload nonvolatiles because "update" load/store 60 * instructions modify register state. */ 61 /* Future optimization: only reload non-volatiles if they were 62 * actually modified. */ 63 r = RESUME_HOST_NV; 64 break; 65 case EMULATE_FAIL: 66 /* XXX Deliver Program interrupt to guest. */ 67 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, 68 kvmppc_get_last_inst(vcpu)); 69 r = RESUME_HOST; 70 break; 71 default: 72 BUG(); 73 } 74 75 return r; 76 } 77 78 int kvm_arch_hardware_enable(void *garbage) 79 { 80 return 0; 81 } 82 83 void kvm_arch_hardware_disable(void *garbage) 84 { 85 } 86 87 int kvm_arch_hardware_setup(void) 88 { 89 return 0; 90 } 91 92 void kvm_arch_hardware_unsetup(void) 93 { 94 } 95 96 void kvm_arch_check_processor_compat(void *rtn) 97 { 98 *(int *)rtn = kvmppc_core_check_processor_compat(); 99 } 100 101 struct kvm *kvm_arch_create_vm(void) 102 { 103 struct kvm *kvm; 104 105 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); 106 if (!kvm) 107 return ERR_PTR(-ENOMEM); 108 109 return kvm; 110 } 111 112 static void kvmppc_free_vcpus(struct kvm *kvm) 113 { 114 unsigned int i; 115 struct kvm_vcpu *vcpu; 116 117 kvm_for_each_vcpu(i, vcpu, kvm) 118 kvm_arch_vcpu_free(vcpu); 119 120 mutex_lock(&kvm->lock); 121 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 122 kvm->vcpus[i] = NULL; 123 124 atomic_set(&kvm->online_vcpus, 0); 125 mutex_unlock(&kvm->lock); 126 } 127 128 void kvm_arch_sync_events(struct kvm *kvm) 129 { 130 } 131 132 void kvm_arch_destroy_vm(struct kvm *kvm) 133 { 134 kvmppc_free_vcpus(kvm); 135 kvm_free_physmem(kvm); 136 cleanup_srcu_struct(&kvm->srcu); 137 kfree(kvm); 138 } 139 140 int kvm_dev_ioctl_check_extension(long ext) 141 { 142 int r; 143 144 switch (ext) { 145 case KVM_CAP_PPC_SEGSTATE: 146 case KVM_CAP_PPC_PAIRED_SINGLES: 147 case KVM_CAP_PPC_UNSET_IRQ: 148 case KVM_CAP_ENABLE_CAP: 149 case KVM_CAP_PPC_OSI: 150 r = 1; 151 break; 152 case KVM_CAP_COALESCED_MMIO: 153 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 154 break; 155 default: 156 r = 0; 157 break; 158 } 159 return r; 160 161 } 162 163 long kvm_arch_dev_ioctl(struct file *filp, 164 unsigned int ioctl, unsigned long arg) 165 { 166 return -EINVAL; 167 } 168 169 int kvm_arch_prepare_memory_region(struct kvm *kvm, 170 struct kvm_memory_slot *memslot, 171 struct kvm_memory_slot old, 172 struct kvm_userspace_memory_region *mem, 173 int user_alloc) 174 { 175 return 0; 176 } 177 178 void kvm_arch_commit_memory_region(struct kvm *kvm, 179 struct kvm_userspace_memory_region *mem, 180 struct kvm_memory_slot old, 181 int user_alloc) 182 { 183 return; 184 } 185 186 187 void kvm_arch_flush_shadow(struct kvm *kvm) 188 { 189 } 190 191 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 192 { 193 struct kvm_vcpu *vcpu; 194 vcpu = kvmppc_core_vcpu_create(kvm, id); 195 if (!IS_ERR(vcpu)) 196 kvmppc_create_vcpu_debugfs(vcpu, id); 197 return vcpu; 198 } 199 200 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 201 { 202 /* Make sure we're not using the vcpu anymore */ 203 hrtimer_cancel(&vcpu->arch.dec_timer); 204 tasklet_kill(&vcpu->arch.tasklet); 205 206 kvmppc_remove_vcpu_debugfs(vcpu); 207 kvmppc_core_vcpu_free(vcpu); 208 } 209 210 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 211 { 212 kvm_arch_vcpu_free(vcpu); 213 } 214 215 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 216 { 217 return kvmppc_core_pending_dec(vcpu); 218 } 219 220 static void kvmppc_decrementer_func(unsigned long data) 221 { 222 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 223 224 kvmppc_core_queue_dec(vcpu); 225 226 if (waitqueue_active(&vcpu->wq)) { 227 wake_up_interruptible(&vcpu->wq); 228 vcpu->stat.halt_wakeup++; 229 } 230 } 231 232 /* 233 * low level hrtimer wake routine. Because this runs in hardirq context 234 * we schedule a tasklet to do the real work. 235 */ 236 enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) 237 { 238 struct kvm_vcpu *vcpu; 239 240 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); 241 tasklet_schedule(&vcpu->arch.tasklet); 242 243 return HRTIMER_NORESTART; 244 } 245 246 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 247 { 248 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 249 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); 250 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; 251 252 return 0; 253 } 254 255 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 256 { 257 kvmppc_mmu_destroy(vcpu); 258 } 259 260 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 261 { 262 kvmppc_core_vcpu_load(vcpu, cpu); 263 } 264 265 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 266 { 267 kvmppc_core_vcpu_put(vcpu); 268 } 269 270 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 271 struct kvm_guest_debug *dbg) 272 { 273 return -EINVAL; 274 } 275 276 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 277 struct kvm_run *run) 278 { 279 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data); 280 } 281 282 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 283 struct kvm_run *run) 284 { 285 u64 uninitialized_var(gpr); 286 287 if (run->mmio.len > sizeof(gpr)) { 288 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 289 return; 290 } 291 292 if (vcpu->arch.mmio_is_bigendian) { 293 switch (run->mmio.len) { 294 case 8: gpr = *(u64 *)run->mmio.data; break; 295 case 4: gpr = *(u32 *)run->mmio.data; break; 296 case 2: gpr = *(u16 *)run->mmio.data; break; 297 case 1: gpr = *(u8 *)run->mmio.data; break; 298 } 299 } else { 300 /* Convert BE data from userland back to LE. */ 301 switch (run->mmio.len) { 302 case 4: gpr = ld_le32((u32 *)run->mmio.data); break; 303 case 2: gpr = ld_le16((u16 *)run->mmio.data); break; 304 case 1: gpr = *(u8 *)run->mmio.data; break; 305 } 306 } 307 308 if (vcpu->arch.mmio_sign_extend) { 309 switch (run->mmio.len) { 310 #ifdef CONFIG_PPC64 311 case 4: 312 gpr = (s64)(s32)gpr; 313 break; 314 #endif 315 case 2: 316 gpr = (s64)(s16)gpr; 317 break; 318 case 1: 319 gpr = (s64)(s8)gpr; 320 break; 321 } 322 } 323 324 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 325 326 switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) { 327 case KVM_REG_GPR: 328 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 329 break; 330 case KVM_REG_FPR: 331 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; 332 break; 333 #ifdef CONFIG_PPC_BOOK3S 334 case KVM_REG_QPR: 335 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; 336 break; 337 case KVM_REG_FQPR: 338 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; 339 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; 340 break; 341 #endif 342 default: 343 BUG(); 344 } 345 } 346 347 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 348 unsigned int rt, unsigned int bytes, int is_bigendian) 349 { 350 if (bytes > sizeof(run->mmio.data)) { 351 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 352 run->mmio.len); 353 } 354 355 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 356 run->mmio.len = bytes; 357 run->mmio.is_write = 0; 358 359 vcpu->arch.io_gpr = rt; 360 vcpu->arch.mmio_is_bigendian = is_bigendian; 361 vcpu->mmio_needed = 1; 362 vcpu->mmio_is_write = 0; 363 vcpu->arch.mmio_sign_extend = 0; 364 365 return EMULATE_DO_MMIO; 366 } 367 368 /* Same as above, but sign extends */ 369 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 370 unsigned int rt, unsigned int bytes, int is_bigendian) 371 { 372 int r; 373 374 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); 375 vcpu->arch.mmio_sign_extend = 1; 376 377 return r; 378 } 379 380 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 381 u64 val, unsigned int bytes, int is_bigendian) 382 { 383 void *data = run->mmio.data; 384 385 if (bytes > sizeof(run->mmio.data)) { 386 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 387 run->mmio.len); 388 } 389 390 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 391 run->mmio.len = bytes; 392 run->mmio.is_write = 1; 393 vcpu->mmio_needed = 1; 394 vcpu->mmio_is_write = 1; 395 396 /* Store the value at the lowest bytes in 'data'. */ 397 if (is_bigendian) { 398 switch (bytes) { 399 case 8: *(u64 *)data = val; break; 400 case 4: *(u32 *)data = val; break; 401 case 2: *(u16 *)data = val; break; 402 case 1: *(u8 *)data = val; break; 403 } 404 } else { 405 /* Store LE value into 'data'. */ 406 switch (bytes) { 407 case 4: st_le32(data, val); break; 408 case 2: st_le16(data, val); break; 409 case 1: *(u8 *)data = val; break; 410 } 411 } 412 413 return EMULATE_DO_MMIO; 414 } 415 416 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 417 { 418 int r; 419 sigset_t sigsaved; 420 421 if (vcpu->sigset_active) 422 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 423 424 if (vcpu->mmio_needed) { 425 if (!vcpu->mmio_is_write) 426 kvmppc_complete_mmio_load(vcpu, run); 427 vcpu->mmio_needed = 0; 428 } else if (vcpu->arch.dcr_needed) { 429 if (!vcpu->arch.dcr_is_write) 430 kvmppc_complete_dcr_load(vcpu, run); 431 vcpu->arch.dcr_needed = 0; 432 } else if (vcpu->arch.osi_needed) { 433 u64 *gprs = run->osi.gprs; 434 int i; 435 436 for (i = 0; i < 32; i++) 437 kvmppc_set_gpr(vcpu, i, gprs[i]); 438 vcpu->arch.osi_needed = 0; 439 } 440 441 kvmppc_core_deliver_interrupts(vcpu); 442 443 local_irq_disable(); 444 kvm_guest_enter(); 445 r = __kvmppc_vcpu_run(run, vcpu); 446 kvm_guest_exit(); 447 local_irq_enable(); 448 449 if (vcpu->sigset_active) 450 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 451 452 return r; 453 } 454 455 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 456 { 457 if (irq->irq == KVM_INTERRUPT_UNSET) 458 kvmppc_core_dequeue_external(vcpu, irq); 459 else 460 kvmppc_core_queue_external(vcpu, irq); 461 462 if (waitqueue_active(&vcpu->wq)) { 463 wake_up_interruptible(&vcpu->wq); 464 vcpu->stat.halt_wakeup++; 465 } 466 467 return 0; 468 } 469 470 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 471 struct kvm_enable_cap *cap) 472 { 473 int r; 474 475 if (cap->flags) 476 return -EINVAL; 477 478 switch (cap->cap) { 479 case KVM_CAP_PPC_OSI: 480 r = 0; 481 vcpu->arch.osi_enabled = true; 482 break; 483 default: 484 r = -EINVAL; 485 break; 486 } 487 488 return r; 489 } 490 491 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 492 struct kvm_mp_state *mp_state) 493 { 494 return -EINVAL; 495 } 496 497 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 498 struct kvm_mp_state *mp_state) 499 { 500 return -EINVAL; 501 } 502 503 long kvm_arch_vcpu_ioctl(struct file *filp, 504 unsigned int ioctl, unsigned long arg) 505 { 506 struct kvm_vcpu *vcpu = filp->private_data; 507 void __user *argp = (void __user *)arg; 508 long r; 509 510 switch (ioctl) { 511 case KVM_INTERRUPT: { 512 struct kvm_interrupt irq; 513 r = -EFAULT; 514 if (copy_from_user(&irq, argp, sizeof(irq))) 515 goto out; 516 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 517 goto out; 518 } 519 520 case KVM_ENABLE_CAP: 521 { 522 struct kvm_enable_cap cap; 523 r = -EFAULT; 524 if (copy_from_user(&cap, argp, sizeof(cap))) 525 goto out; 526 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 527 break; 528 } 529 default: 530 r = -EINVAL; 531 } 532 533 out: 534 return r; 535 } 536 537 long kvm_arch_vm_ioctl(struct file *filp, 538 unsigned int ioctl, unsigned long arg) 539 { 540 long r; 541 542 switch (ioctl) { 543 default: 544 r = -ENOTTY; 545 } 546 547 return r; 548 } 549 550 int kvm_arch_init(void *opaque) 551 { 552 return 0; 553 } 554 555 void kvm_arch_exit(void) 556 { 557 } 558