1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/kvm_host.h> 24 #include <linux/module.h> 25 #include <linux/vmalloc.h> 26 #include <linux/hrtimer.h> 27 #include <linux/fs.h> 28 #include <linux/slab.h> 29 #include <asm/cputable.h> 30 #include <asm/uaccess.h> 31 #include <asm/kvm_ppc.h> 32 #include <asm/tlbflush.h> 33 #include "timing.h" 34 #include "../mm/mmu_decl.h" 35 36 #define CREATE_TRACE_POINTS 37 #include "trace.h" 38 39 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) 40 { 41 return gfn; 42 } 43 44 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 45 { 46 return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions); 47 } 48 49 50 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 51 { 52 enum emulation_result er; 53 int r; 54 55 er = kvmppc_emulate_instruction(run, vcpu); 56 switch (er) { 57 case EMULATE_DONE: 58 /* Future optimization: only reload non-volatiles if they were 59 * actually modified. */ 60 r = RESUME_GUEST_NV; 61 break; 62 case EMULATE_DO_MMIO: 63 run->exit_reason = KVM_EXIT_MMIO; 64 /* We must reload nonvolatiles because "update" load/store 65 * instructions modify register state. */ 66 /* Future optimization: only reload non-volatiles if they were 67 * actually modified. */ 68 r = RESUME_HOST_NV; 69 break; 70 case EMULATE_FAIL: 71 /* XXX Deliver Program interrupt to guest. */ 72 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, 73 kvmppc_get_last_inst(vcpu)); 74 r = RESUME_HOST; 75 break; 76 default: 77 BUG(); 78 } 79 80 return r; 81 } 82 83 int kvm_arch_hardware_enable(void *garbage) 84 { 85 return 0; 86 } 87 88 void kvm_arch_hardware_disable(void *garbage) 89 { 90 } 91 92 int kvm_arch_hardware_setup(void) 93 { 94 return 0; 95 } 96 97 void kvm_arch_hardware_unsetup(void) 98 { 99 } 100 101 void kvm_arch_check_processor_compat(void *rtn) 102 { 103 *(int *)rtn = kvmppc_core_check_processor_compat(); 104 } 105 106 struct kvm *kvm_arch_create_vm(void) 107 { 108 struct kvm *kvm; 109 110 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); 111 if (!kvm) 112 return ERR_PTR(-ENOMEM); 113 114 return kvm; 115 } 116 117 static void kvmppc_free_vcpus(struct kvm *kvm) 118 { 119 unsigned int i; 120 struct kvm_vcpu *vcpu; 121 122 kvm_for_each_vcpu(i, vcpu, kvm) 123 kvm_arch_vcpu_free(vcpu); 124 125 mutex_lock(&kvm->lock); 126 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 127 kvm->vcpus[i] = NULL; 128 129 atomic_set(&kvm->online_vcpus, 0); 130 mutex_unlock(&kvm->lock); 131 } 132 133 void kvm_arch_sync_events(struct kvm *kvm) 134 { 135 } 136 137 void kvm_arch_destroy_vm(struct kvm *kvm) 138 { 139 kvmppc_free_vcpus(kvm); 140 kvm_free_physmem(kvm); 141 cleanup_srcu_struct(&kvm->srcu); 142 kfree(kvm); 143 } 144 145 int kvm_dev_ioctl_check_extension(long ext) 146 { 147 int r; 148 149 switch (ext) { 150 case KVM_CAP_PPC_SEGSTATE: 151 case KVM_CAP_PPC_PAIRED_SINGLES: 152 case KVM_CAP_PPC_UNSET_IRQ: 153 case KVM_CAP_ENABLE_CAP: 154 case KVM_CAP_PPC_OSI: 155 r = 1; 156 break; 157 case KVM_CAP_COALESCED_MMIO: 158 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 159 break; 160 default: 161 r = 0; 162 break; 163 } 164 return r; 165 166 } 167 168 long kvm_arch_dev_ioctl(struct file *filp, 169 unsigned int ioctl, unsigned long arg) 170 { 171 return -EINVAL; 172 } 173 174 int kvm_arch_prepare_memory_region(struct kvm *kvm, 175 struct kvm_memory_slot *memslot, 176 struct kvm_memory_slot old, 177 struct kvm_userspace_memory_region *mem, 178 int user_alloc) 179 { 180 return 0; 181 } 182 183 void kvm_arch_commit_memory_region(struct kvm *kvm, 184 struct kvm_userspace_memory_region *mem, 185 struct kvm_memory_slot old, 186 int user_alloc) 187 { 188 return; 189 } 190 191 192 void kvm_arch_flush_shadow(struct kvm *kvm) 193 { 194 } 195 196 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 197 { 198 struct kvm_vcpu *vcpu; 199 vcpu = kvmppc_core_vcpu_create(kvm, id); 200 if (!IS_ERR(vcpu)) 201 kvmppc_create_vcpu_debugfs(vcpu, id); 202 return vcpu; 203 } 204 205 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 206 { 207 /* Make sure we're not using the vcpu anymore */ 208 hrtimer_cancel(&vcpu->arch.dec_timer); 209 tasklet_kill(&vcpu->arch.tasklet); 210 211 kvmppc_remove_vcpu_debugfs(vcpu); 212 kvmppc_core_vcpu_free(vcpu); 213 } 214 215 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 216 { 217 kvm_arch_vcpu_free(vcpu); 218 } 219 220 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 221 { 222 return kvmppc_core_pending_dec(vcpu); 223 } 224 225 static void kvmppc_decrementer_func(unsigned long data) 226 { 227 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 228 229 kvmppc_core_queue_dec(vcpu); 230 231 if (waitqueue_active(&vcpu->wq)) { 232 wake_up_interruptible(&vcpu->wq); 233 vcpu->stat.halt_wakeup++; 234 } 235 } 236 237 /* 238 * low level hrtimer wake routine. Because this runs in hardirq context 239 * we schedule a tasklet to do the real work. 240 */ 241 enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) 242 { 243 struct kvm_vcpu *vcpu; 244 245 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); 246 tasklet_schedule(&vcpu->arch.tasklet); 247 248 return HRTIMER_NORESTART; 249 } 250 251 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 252 { 253 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 254 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); 255 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; 256 257 return 0; 258 } 259 260 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 261 { 262 kvmppc_mmu_destroy(vcpu); 263 } 264 265 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 266 { 267 kvmppc_core_vcpu_load(vcpu, cpu); 268 } 269 270 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 271 { 272 kvmppc_core_vcpu_put(vcpu); 273 } 274 275 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 276 struct kvm_guest_debug *dbg) 277 { 278 return -EINVAL; 279 } 280 281 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 282 struct kvm_run *run) 283 { 284 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data); 285 } 286 287 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 288 struct kvm_run *run) 289 { 290 u64 gpr; 291 292 if (run->mmio.len > sizeof(gpr)) { 293 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 294 return; 295 } 296 297 if (vcpu->arch.mmio_is_bigendian) { 298 switch (run->mmio.len) { 299 case 8: gpr = *(u64 *)run->mmio.data; break; 300 case 4: gpr = *(u32 *)run->mmio.data; break; 301 case 2: gpr = *(u16 *)run->mmio.data; break; 302 case 1: gpr = *(u8 *)run->mmio.data; break; 303 } 304 } else { 305 /* Convert BE data from userland back to LE. */ 306 switch (run->mmio.len) { 307 case 4: gpr = ld_le32((u32 *)run->mmio.data); break; 308 case 2: gpr = ld_le16((u16 *)run->mmio.data); break; 309 case 1: gpr = *(u8 *)run->mmio.data; break; 310 } 311 } 312 313 if (vcpu->arch.mmio_sign_extend) { 314 switch (run->mmio.len) { 315 #ifdef CONFIG_PPC64 316 case 4: 317 gpr = (s64)(s32)gpr; 318 break; 319 #endif 320 case 2: 321 gpr = (s64)(s16)gpr; 322 break; 323 case 1: 324 gpr = (s64)(s8)gpr; 325 break; 326 } 327 } 328 329 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 330 331 switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) { 332 case KVM_REG_GPR: 333 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 334 break; 335 case KVM_REG_FPR: 336 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; 337 break; 338 #ifdef CONFIG_PPC_BOOK3S 339 case KVM_REG_QPR: 340 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; 341 break; 342 case KVM_REG_FQPR: 343 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; 344 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; 345 break; 346 #endif 347 default: 348 BUG(); 349 } 350 } 351 352 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 353 unsigned int rt, unsigned int bytes, int is_bigendian) 354 { 355 if (bytes > sizeof(run->mmio.data)) { 356 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 357 run->mmio.len); 358 } 359 360 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 361 run->mmio.len = bytes; 362 run->mmio.is_write = 0; 363 364 vcpu->arch.io_gpr = rt; 365 vcpu->arch.mmio_is_bigendian = is_bigendian; 366 vcpu->mmio_needed = 1; 367 vcpu->mmio_is_write = 0; 368 vcpu->arch.mmio_sign_extend = 0; 369 370 return EMULATE_DO_MMIO; 371 } 372 373 /* Same as above, but sign extends */ 374 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 375 unsigned int rt, unsigned int bytes, int is_bigendian) 376 { 377 int r; 378 379 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); 380 vcpu->arch.mmio_sign_extend = 1; 381 382 return r; 383 } 384 385 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 386 u64 val, unsigned int bytes, int is_bigendian) 387 { 388 void *data = run->mmio.data; 389 390 if (bytes > sizeof(run->mmio.data)) { 391 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 392 run->mmio.len); 393 } 394 395 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 396 run->mmio.len = bytes; 397 run->mmio.is_write = 1; 398 vcpu->mmio_needed = 1; 399 vcpu->mmio_is_write = 1; 400 401 /* Store the value at the lowest bytes in 'data'. */ 402 if (is_bigendian) { 403 switch (bytes) { 404 case 8: *(u64 *)data = val; break; 405 case 4: *(u32 *)data = val; break; 406 case 2: *(u16 *)data = val; break; 407 case 1: *(u8 *)data = val; break; 408 } 409 } else { 410 /* Store LE value into 'data'. */ 411 switch (bytes) { 412 case 4: st_le32(data, val); break; 413 case 2: st_le16(data, val); break; 414 case 1: *(u8 *)data = val; break; 415 } 416 } 417 418 return EMULATE_DO_MMIO; 419 } 420 421 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 422 { 423 int r; 424 sigset_t sigsaved; 425 426 vcpu_load(vcpu); 427 428 if (vcpu->sigset_active) 429 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 430 431 if (vcpu->mmio_needed) { 432 if (!vcpu->mmio_is_write) 433 kvmppc_complete_mmio_load(vcpu, run); 434 vcpu->mmio_needed = 0; 435 } else if (vcpu->arch.dcr_needed) { 436 if (!vcpu->arch.dcr_is_write) 437 kvmppc_complete_dcr_load(vcpu, run); 438 vcpu->arch.dcr_needed = 0; 439 } else if (vcpu->arch.osi_needed) { 440 u64 *gprs = run->osi.gprs; 441 int i; 442 443 for (i = 0; i < 32; i++) 444 kvmppc_set_gpr(vcpu, i, gprs[i]); 445 vcpu->arch.osi_needed = 0; 446 } 447 448 kvmppc_core_deliver_interrupts(vcpu); 449 450 local_irq_disable(); 451 kvm_guest_enter(); 452 r = __kvmppc_vcpu_run(run, vcpu); 453 kvm_guest_exit(); 454 local_irq_enable(); 455 456 if (vcpu->sigset_active) 457 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 458 459 vcpu_put(vcpu); 460 461 return r; 462 } 463 464 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 465 { 466 if (irq->irq == KVM_INTERRUPT_UNSET) 467 kvmppc_core_dequeue_external(vcpu, irq); 468 else 469 kvmppc_core_queue_external(vcpu, irq); 470 471 if (waitqueue_active(&vcpu->wq)) { 472 wake_up_interruptible(&vcpu->wq); 473 vcpu->stat.halt_wakeup++; 474 } 475 476 return 0; 477 } 478 479 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 480 struct kvm_enable_cap *cap) 481 { 482 int r; 483 484 if (cap->flags) 485 return -EINVAL; 486 487 switch (cap->cap) { 488 case KVM_CAP_PPC_OSI: 489 r = 0; 490 vcpu->arch.osi_enabled = true; 491 break; 492 default: 493 r = -EINVAL; 494 break; 495 } 496 497 return r; 498 } 499 500 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 501 struct kvm_mp_state *mp_state) 502 { 503 return -EINVAL; 504 } 505 506 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 507 struct kvm_mp_state *mp_state) 508 { 509 return -EINVAL; 510 } 511 512 long kvm_arch_vcpu_ioctl(struct file *filp, 513 unsigned int ioctl, unsigned long arg) 514 { 515 struct kvm_vcpu *vcpu = filp->private_data; 516 void __user *argp = (void __user *)arg; 517 long r; 518 519 switch (ioctl) { 520 case KVM_INTERRUPT: { 521 struct kvm_interrupt irq; 522 r = -EFAULT; 523 if (copy_from_user(&irq, argp, sizeof(irq))) 524 goto out; 525 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 526 break; 527 } 528 case KVM_ENABLE_CAP: 529 { 530 struct kvm_enable_cap cap; 531 r = -EFAULT; 532 if (copy_from_user(&cap, argp, sizeof(cap))) 533 goto out; 534 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 535 break; 536 } 537 default: 538 r = -EINVAL; 539 } 540 541 out: 542 return r; 543 } 544 545 long kvm_arch_vm_ioctl(struct file *filp, 546 unsigned int ioctl, unsigned long arg) 547 { 548 long r; 549 550 switch (ioctl) { 551 default: 552 r = -ENOTTY; 553 } 554 555 return r; 556 } 557 558 int kvm_arch_init(void *opaque) 559 { 560 return 0; 561 } 562 563 void kvm_arch_exit(void) 564 { 565 } 566