1 /* 2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. 3 * 4 * Authors: 5 * Alexander Graf <agraf@suse.de> 6 * Kevin Wolf <mail@kevin-wolf.de> 7 * 8 * Description: 9 * This file is derived from arch/powerpc/kvm/44x.c, 10 * by Hollis Blanchard <hollisb@us.ibm.com>. 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License, version 2, as 14 * published by the Free Software Foundation. 15 */ 16 17 #include <linux/kvm_host.h> 18 #include <linux/err.h> 19 #include <linux/export.h> 20 #include <linux/slab.h> 21 #include <linux/module.h> 22 #include <linux/miscdevice.h> 23 24 #include <asm/reg.h> 25 #include <asm/cputable.h> 26 #include <asm/cacheflush.h> 27 #include <asm/tlbflush.h> 28 #include <linux/uaccess.h> 29 #include <asm/io.h> 30 #include <asm/kvm_ppc.h> 31 #include <asm/kvm_book3s.h> 32 #include <asm/mmu_context.h> 33 #include <asm/page.h> 34 #include <linux/gfp.h> 35 #include <linux/sched.h> 36 #include <linux/vmalloc.h> 37 #include <linux/highmem.h> 38 39 #include "book3s.h" 40 #include "trace.h" 41 42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 43 44 /* #define EXIT_DEBUG */ 45 46 struct kvm_stats_debugfs_item debugfs_entries[] = { 47 { "exits", VCPU_STAT(sum_exits) }, 48 { "mmio", VCPU_STAT(mmio_exits) }, 49 { "sig", VCPU_STAT(signal_exits) }, 50 { "sysc", VCPU_STAT(syscall_exits) }, 51 { "inst_emu", VCPU_STAT(emulated_inst_exits) }, 52 { "dec", VCPU_STAT(dec_exits) }, 53 { "ext_intr", VCPU_STAT(ext_intr_exits) }, 54 { "queue_intr", VCPU_STAT(queue_intr) }, 55 { "halt_poll_success_ns", VCPU_STAT(halt_poll_success_ns) }, 56 { "halt_poll_fail_ns", VCPU_STAT(halt_poll_fail_ns) }, 57 { "halt_wait_ns", VCPU_STAT(halt_wait_ns) }, 58 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), }, 59 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), }, 60 { "halt_successful_wait", VCPU_STAT(halt_successful_wait) }, 61 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) }, 62 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 63 { "pf_storage", VCPU_STAT(pf_storage) }, 64 { "sp_storage", VCPU_STAT(sp_storage) }, 65 { "pf_instruc", VCPU_STAT(pf_instruc) }, 66 { "sp_instruc", VCPU_STAT(sp_instruc) }, 67 { "ld", VCPU_STAT(ld) }, 68 { "ld_slow", VCPU_STAT(ld_slow) }, 69 { "st", VCPU_STAT(st) }, 70 { "st_slow", VCPU_STAT(st_slow) }, 71 { "pthru_all", VCPU_STAT(pthru_all) }, 72 { "pthru_host", VCPU_STAT(pthru_host) }, 73 { "pthru_bad_aff", VCPU_STAT(pthru_bad_aff) }, 74 { NULL } 75 }; 76 77 void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) 78 { 79 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { 80 ulong pc = kvmppc_get_pc(vcpu); 81 if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) 82 kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); 83 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; 84 } 85 } 86 EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real); 87 88 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) 89 { 90 if (!is_kvmppc_hv_enabled(vcpu->kvm)) 91 return to_book3s(vcpu)->hior; 92 return 0; 93 } 94 95 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, 96 unsigned long pending_now, unsigned long old_pending) 97 { 98 if (is_kvmppc_hv_enabled(vcpu->kvm)) 99 return; 100 if (pending_now) 101 kvmppc_set_int_pending(vcpu, 1); 102 else if (old_pending) 103 kvmppc_set_int_pending(vcpu, 0); 104 } 105 106 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) 107 { 108 ulong crit_raw; 109 ulong crit_r1; 110 bool crit; 111 112 if (is_kvmppc_hv_enabled(vcpu->kvm)) 113 return false; 114 115 crit_raw = kvmppc_get_critical(vcpu); 116 crit_r1 = kvmppc_get_gpr(vcpu, 1); 117 118 /* Truncate crit indicators in 32 bit mode */ 119 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { 120 crit_raw &= 0xffffffff; 121 crit_r1 &= 0xffffffff; 122 } 123 124 /* Critical section when crit == r1 */ 125 crit = (crit_raw == crit_r1); 126 /* ... and we're in supervisor mode */ 127 crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR); 128 129 return crit; 130 } 131 132 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) 133 { 134 kvmppc_unfixup_split_real(vcpu); 135 kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); 136 kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags); 137 kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); 138 vcpu->arch.mmu.reset_msr(vcpu); 139 } 140 141 static int kvmppc_book3s_vec2irqprio(unsigned int vec) 142 { 143 unsigned int prio; 144 145 switch (vec) { 146 case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; 147 case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; 148 case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break; 149 case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break; 150 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; 151 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; 152 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; 153 case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break; 154 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; 155 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; 156 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; 157 case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break; 158 case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break; 159 case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break; 160 case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break; 161 case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break; 162 case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break; 163 default: prio = BOOK3S_IRQPRIO_MAX; break; 164 } 165 166 return prio; 167 } 168 169 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, 170 unsigned int vec) 171 { 172 unsigned long old_pending = vcpu->arch.pending_exceptions; 173 174 clear_bit(kvmppc_book3s_vec2irqprio(vec), 175 &vcpu->arch.pending_exceptions); 176 177 kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions, 178 old_pending); 179 } 180 181 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) 182 { 183 vcpu->stat.queue_intr++; 184 185 set_bit(kvmppc_book3s_vec2irqprio(vec), 186 &vcpu->arch.pending_exceptions); 187 #ifdef EXIT_DEBUG 188 printk(KERN_INFO "Queueing interrupt %x\n", vec); 189 #endif 190 } 191 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio); 192 193 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) 194 { 195 /* might as well deliver this straight away */ 196 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags); 197 } 198 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program); 199 200 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) 201 { 202 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); 203 } 204 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec); 205 206 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) 207 { 208 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); 209 } 210 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec); 211 212 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) 213 { 214 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); 215 } 216 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec); 217 218 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 219 struct kvm_interrupt *irq) 220 { 221 unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL; 222 223 if (irq->irq == KVM_INTERRUPT_SET_LEVEL) 224 vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL; 225 226 kvmppc_book3s_queue_irqprio(vcpu, vec); 227 } 228 229 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu) 230 { 231 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); 232 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL); 233 } 234 235 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar, 236 ulong flags) 237 { 238 kvmppc_set_dar(vcpu, dar); 239 kvmppc_set_dsisr(vcpu, flags); 240 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); 241 } 242 EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage); /* used by kvm_hv */ 243 244 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags) 245 { 246 u64 msr = kvmppc_get_msr(vcpu); 247 msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT); 248 msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT); 249 kvmppc_set_msr_fast(vcpu, msr); 250 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); 251 } 252 253 static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, 254 unsigned int priority) 255 { 256 int deliver = 1; 257 int vec = 0; 258 bool crit = kvmppc_critical_section(vcpu); 259 260 switch (priority) { 261 case BOOK3S_IRQPRIO_DECREMENTER: 262 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; 263 vec = BOOK3S_INTERRUPT_DECREMENTER; 264 break; 265 case BOOK3S_IRQPRIO_EXTERNAL: 266 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: 267 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; 268 vec = BOOK3S_INTERRUPT_EXTERNAL; 269 break; 270 case BOOK3S_IRQPRIO_SYSTEM_RESET: 271 vec = BOOK3S_INTERRUPT_SYSTEM_RESET; 272 break; 273 case BOOK3S_IRQPRIO_MACHINE_CHECK: 274 vec = BOOK3S_INTERRUPT_MACHINE_CHECK; 275 break; 276 case BOOK3S_IRQPRIO_DATA_STORAGE: 277 vec = BOOK3S_INTERRUPT_DATA_STORAGE; 278 break; 279 case BOOK3S_IRQPRIO_INST_STORAGE: 280 vec = BOOK3S_INTERRUPT_INST_STORAGE; 281 break; 282 case BOOK3S_IRQPRIO_DATA_SEGMENT: 283 vec = BOOK3S_INTERRUPT_DATA_SEGMENT; 284 break; 285 case BOOK3S_IRQPRIO_INST_SEGMENT: 286 vec = BOOK3S_INTERRUPT_INST_SEGMENT; 287 break; 288 case BOOK3S_IRQPRIO_ALIGNMENT: 289 vec = BOOK3S_INTERRUPT_ALIGNMENT; 290 break; 291 case BOOK3S_IRQPRIO_PROGRAM: 292 vec = BOOK3S_INTERRUPT_PROGRAM; 293 break; 294 case BOOK3S_IRQPRIO_VSX: 295 vec = BOOK3S_INTERRUPT_VSX; 296 break; 297 case BOOK3S_IRQPRIO_ALTIVEC: 298 vec = BOOK3S_INTERRUPT_ALTIVEC; 299 break; 300 case BOOK3S_IRQPRIO_FP_UNAVAIL: 301 vec = BOOK3S_INTERRUPT_FP_UNAVAIL; 302 break; 303 case BOOK3S_IRQPRIO_SYSCALL: 304 vec = BOOK3S_INTERRUPT_SYSCALL; 305 break; 306 case BOOK3S_IRQPRIO_DEBUG: 307 vec = BOOK3S_INTERRUPT_TRACE; 308 break; 309 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR: 310 vec = BOOK3S_INTERRUPT_PERFMON; 311 break; 312 case BOOK3S_IRQPRIO_FAC_UNAVAIL: 313 vec = BOOK3S_INTERRUPT_FAC_UNAVAIL; 314 break; 315 default: 316 deliver = 0; 317 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority); 318 break; 319 } 320 321 #if 0 322 printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver); 323 #endif 324 325 if (deliver) 326 kvmppc_inject_interrupt(vcpu, vec, 0); 327 328 return deliver; 329 } 330 331 /* 332 * This function determines if an irqprio should be cleared once issued. 333 */ 334 static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority) 335 { 336 switch (priority) { 337 case BOOK3S_IRQPRIO_DECREMENTER: 338 /* DEC interrupts get cleared by mtdec */ 339 return false; 340 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: 341 /* External interrupts get cleared by userspace */ 342 return false; 343 } 344 345 return true; 346 } 347 348 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) 349 { 350 unsigned long *pending = &vcpu->arch.pending_exceptions; 351 unsigned long old_pending = vcpu->arch.pending_exceptions; 352 unsigned int priority; 353 354 #ifdef EXIT_DEBUG 355 if (vcpu->arch.pending_exceptions) 356 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); 357 #endif 358 priority = __ffs(*pending); 359 while (priority < BOOK3S_IRQPRIO_MAX) { 360 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && 361 clear_irqprio(vcpu, priority)) { 362 clear_bit(priority, &vcpu->arch.pending_exceptions); 363 break; 364 } 365 366 priority = find_next_bit(pending, 367 BITS_PER_BYTE * sizeof(*pending), 368 priority + 1); 369 } 370 371 /* Tell the guest about our interrupt status */ 372 kvmppc_update_int_pending(vcpu, *pending, old_pending); 373 374 return 0; 375 } 376 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter); 377 378 kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing, 379 bool *writable) 380 { 381 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM; 382 gfn_t gfn = gpa >> PAGE_SHIFT; 383 384 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 385 mp_pa = (uint32_t)mp_pa; 386 387 /* Magic page override */ 388 gpa &= ~0xFFFULL; 389 if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) { 390 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; 391 kvm_pfn_t pfn; 392 393 pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; 394 get_page(pfn_to_page(pfn)); 395 if (writable) 396 *writable = true; 397 return pfn; 398 } 399 400 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); 401 } 402 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn); 403 404 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, 405 enum xlate_readwrite xlrw, struct kvmppc_pte *pte) 406 { 407 bool data = (xlid == XLATE_DATA); 408 bool iswrite = (xlrw == XLATE_WRITE); 409 int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR)); 410 int r; 411 412 if (relocated) { 413 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); 414 } else { 415 pte->eaddr = eaddr; 416 pte->raddr = eaddr & KVM_PAM; 417 pte->vpage = VSID_REAL | eaddr >> 12; 418 pte->may_read = true; 419 pte->may_write = true; 420 pte->may_execute = true; 421 r = 0; 422 423 if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR && 424 !data) { 425 if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && 426 ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) 427 pte->raddr &= ~SPLIT_HACK_MASK; 428 } 429 } 430 431 return r; 432 } 433 434 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, 435 u32 *inst) 436 { 437 ulong pc = kvmppc_get_pc(vcpu); 438 int r; 439 440 if (type == INST_SC) 441 pc -= 4; 442 443 r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false); 444 if (r == EMULATE_DONE) 445 return r; 446 else 447 return EMULATE_AGAIN; 448 } 449 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst); 450 451 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 452 { 453 return 0; 454 } 455 456 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu) 457 { 458 return 0; 459 } 460 461 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) 462 { 463 } 464 465 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 466 struct kvm_sregs *sregs) 467 { 468 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); 469 } 470 471 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 472 struct kvm_sregs *sregs) 473 { 474 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); 475 } 476 477 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 478 { 479 int i; 480 481 regs->pc = kvmppc_get_pc(vcpu); 482 regs->cr = kvmppc_get_cr(vcpu); 483 regs->ctr = kvmppc_get_ctr(vcpu); 484 regs->lr = kvmppc_get_lr(vcpu); 485 regs->xer = kvmppc_get_xer(vcpu); 486 regs->msr = kvmppc_get_msr(vcpu); 487 regs->srr0 = kvmppc_get_srr0(vcpu); 488 regs->srr1 = kvmppc_get_srr1(vcpu); 489 regs->pid = vcpu->arch.pid; 490 regs->sprg0 = kvmppc_get_sprg0(vcpu); 491 regs->sprg1 = kvmppc_get_sprg1(vcpu); 492 regs->sprg2 = kvmppc_get_sprg2(vcpu); 493 regs->sprg3 = kvmppc_get_sprg3(vcpu); 494 regs->sprg4 = kvmppc_get_sprg4(vcpu); 495 regs->sprg5 = kvmppc_get_sprg5(vcpu); 496 regs->sprg6 = kvmppc_get_sprg6(vcpu); 497 regs->sprg7 = kvmppc_get_sprg7(vcpu); 498 499 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 500 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); 501 502 return 0; 503 } 504 505 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 506 { 507 int i; 508 509 kvmppc_set_pc(vcpu, regs->pc); 510 kvmppc_set_cr(vcpu, regs->cr); 511 kvmppc_set_ctr(vcpu, regs->ctr); 512 kvmppc_set_lr(vcpu, regs->lr); 513 kvmppc_set_xer(vcpu, regs->xer); 514 kvmppc_set_msr(vcpu, regs->msr); 515 kvmppc_set_srr0(vcpu, regs->srr0); 516 kvmppc_set_srr1(vcpu, regs->srr1); 517 kvmppc_set_sprg0(vcpu, regs->sprg0); 518 kvmppc_set_sprg1(vcpu, regs->sprg1); 519 kvmppc_set_sprg2(vcpu, regs->sprg2); 520 kvmppc_set_sprg3(vcpu, regs->sprg3); 521 kvmppc_set_sprg4(vcpu, regs->sprg4); 522 kvmppc_set_sprg5(vcpu, regs->sprg5); 523 kvmppc_set_sprg6(vcpu, regs->sprg6); 524 kvmppc_set_sprg7(vcpu, regs->sprg7); 525 526 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 527 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); 528 529 return 0; 530 } 531 532 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 533 { 534 return -ENOTSUPP; 535 } 536 537 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 538 { 539 return -ENOTSUPP; 540 } 541 542 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, 543 union kvmppc_one_reg *val) 544 { 545 int r = 0; 546 long int i; 547 548 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val); 549 if (r == -EINVAL) { 550 r = 0; 551 switch (id) { 552 case KVM_REG_PPC_DAR: 553 *val = get_reg_val(id, kvmppc_get_dar(vcpu)); 554 break; 555 case KVM_REG_PPC_DSISR: 556 *val = get_reg_val(id, kvmppc_get_dsisr(vcpu)); 557 break; 558 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 559 i = id - KVM_REG_PPC_FPR0; 560 *val = get_reg_val(id, VCPU_FPR(vcpu, i)); 561 break; 562 case KVM_REG_PPC_FPSCR: 563 *val = get_reg_val(id, vcpu->arch.fp.fpscr); 564 break; 565 #ifdef CONFIG_VSX 566 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: 567 if (cpu_has_feature(CPU_FTR_VSX)) { 568 i = id - KVM_REG_PPC_VSR0; 569 val->vsxval[0] = vcpu->arch.fp.fpr[i][0]; 570 val->vsxval[1] = vcpu->arch.fp.fpr[i][1]; 571 } else { 572 r = -ENXIO; 573 } 574 break; 575 #endif /* CONFIG_VSX */ 576 case KVM_REG_PPC_DEBUG_INST: 577 *val = get_reg_val(id, INS_TW); 578 break; 579 #ifdef CONFIG_KVM_XICS 580 case KVM_REG_PPC_ICP_STATE: 581 if (!vcpu->arch.icp) { 582 r = -ENXIO; 583 break; 584 } 585 *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu)); 586 break; 587 #endif /* CONFIG_KVM_XICS */ 588 case KVM_REG_PPC_FSCR: 589 *val = get_reg_val(id, vcpu->arch.fscr); 590 break; 591 case KVM_REG_PPC_TAR: 592 *val = get_reg_val(id, vcpu->arch.tar); 593 break; 594 case KVM_REG_PPC_EBBHR: 595 *val = get_reg_val(id, vcpu->arch.ebbhr); 596 break; 597 case KVM_REG_PPC_EBBRR: 598 *val = get_reg_val(id, vcpu->arch.ebbrr); 599 break; 600 case KVM_REG_PPC_BESCR: 601 *val = get_reg_val(id, vcpu->arch.bescr); 602 break; 603 case KVM_REG_PPC_IC: 604 *val = get_reg_val(id, vcpu->arch.ic); 605 break; 606 default: 607 r = -EINVAL; 608 break; 609 } 610 } 611 612 return r; 613 } 614 615 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, 616 union kvmppc_one_reg *val) 617 { 618 int r = 0; 619 long int i; 620 621 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val); 622 if (r == -EINVAL) { 623 r = 0; 624 switch (id) { 625 case KVM_REG_PPC_DAR: 626 kvmppc_set_dar(vcpu, set_reg_val(id, *val)); 627 break; 628 case KVM_REG_PPC_DSISR: 629 kvmppc_set_dsisr(vcpu, set_reg_val(id, *val)); 630 break; 631 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 632 i = id - KVM_REG_PPC_FPR0; 633 VCPU_FPR(vcpu, i) = set_reg_val(id, *val); 634 break; 635 case KVM_REG_PPC_FPSCR: 636 vcpu->arch.fp.fpscr = set_reg_val(id, *val); 637 break; 638 #ifdef CONFIG_VSX 639 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: 640 if (cpu_has_feature(CPU_FTR_VSX)) { 641 i = id - KVM_REG_PPC_VSR0; 642 vcpu->arch.fp.fpr[i][0] = val->vsxval[0]; 643 vcpu->arch.fp.fpr[i][1] = val->vsxval[1]; 644 } else { 645 r = -ENXIO; 646 } 647 break; 648 #endif /* CONFIG_VSX */ 649 #ifdef CONFIG_KVM_XICS 650 case KVM_REG_PPC_ICP_STATE: 651 if (!vcpu->arch.icp) { 652 r = -ENXIO; 653 break; 654 } 655 r = kvmppc_xics_set_icp(vcpu, 656 set_reg_val(id, *val)); 657 break; 658 #endif /* CONFIG_KVM_XICS */ 659 case KVM_REG_PPC_FSCR: 660 vcpu->arch.fscr = set_reg_val(id, *val); 661 break; 662 case KVM_REG_PPC_TAR: 663 vcpu->arch.tar = set_reg_val(id, *val); 664 break; 665 case KVM_REG_PPC_EBBHR: 666 vcpu->arch.ebbhr = set_reg_val(id, *val); 667 break; 668 case KVM_REG_PPC_EBBRR: 669 vcpu->arch.ebbrr = set_reg_val(id, *val); 670 break; 671 case KVM_REG_PPC_BESCR: 672 vcpu->arch.bescr = set_reg_val(id, *val); 673 break; 674 case KVM_REG_PPC_IC: 675 vcpu->arch.ic = set_reg_val(id, *val); 676 break; 677 default: 678 r = -EINVAL; 679 break; 680 } 681 } 682 683 return r; 684 } 685 686 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 687 { 688 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); 689 } 690 691 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 692 { 693 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); 694 } 695 696 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) 697 { 698 vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr); 699 } 700 EXPORT_SYMBOL_GPL(kvmppc_set_msr); 701 702 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 703 { 704 return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu); 705 } 706 707 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 708 struct kvm_translation *tr) 709 { 710 return 0; 711 } 712 713 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 714 struct kvm_guest_debug *dbg) 715 { 716 vcpu->guest_debug = dbg->control; 717 return 0; 718 } 719 720 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu) 721 { 722 kvmppc_core_queue_dec(vcpu); 723 kvm_vcpu_kick(vcpu); 724 } 725 726 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) 727 { 728 return kvm->arch.kvm_ops->vcpu_create(kvm, id); 729 } 730 731 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 732 { 733 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); 734 } 735 736 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) 737 { 738 return vcpu->kvm->arch.kvm_ops->check_requests(vcpu); 739 } 740 741 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 742 { 743 return kvm->arch.kvm_ops->get_dirty_log(kvm, log); 744 } 745 746 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 747 struct kvm_memory_slot *dont) 748 { 749 kvm->arch.kvm_ops->free_memslot(free, dont); 750 } 751 752 int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 753 unsigned long npages) 754 { 755 return kvm->arch.kvm_ops->create_memslot(slot, npages); 756 } 757 758 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) 759 { 760 kvm->arch.kvm_ops->flush_memslot(kvm, memslot); 761 } 762 763 int kvmppc_core_prepare_memory_region(struct kvm *kvm, 764 struct kvm_memory_slot *memslot, 765 const struct kvm_userspace_memory_region *mem) 766 { 767 return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem); 768 } 769 770 void kvmppc_core_commit_memory_region(struct kvm *kvm, 771 const struct kvm_userspace_memory_region *mem, 772 const struct kvm_memory_slot *old, 773 const struct kvm_memory_slot *new) 774 { 775 kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new); 776 } 777 778 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 779 { 780 return kvm->arch.kvm_ops->unmap_hva(kvm, hva); 781 } 782 EXPORT_SYMBOL_GPL(kvm_unmap_hva); 783 784 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 785 { 786 return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); 787 } 788 789 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) 790 { 791 return kvm->arch.kvm_ops->age_hva(kvm, start, end); 792 } 793 794 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) 795 { 796 return kvm->arch.kvm_ops->test_age_hva(kvm, hva); 797 } 798 799 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) 800 { 801 kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte); 802 } 803 804 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 805 { 806 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); 807 } 808 809 int kvmppc_core_init_vm(struct kvm *kvm) 810 { 811 812 #ifdef CONFIG_PPC64 813 INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables); 814 INIT_LIST_HEAD(&kvm->arch.rtas_tokens); 815 #endif 816 817 return kvm->arch.kvm_ops->init_vm(kvm); 818 } 819 820 void kvmppc_core_destroy_vm(struct kvm *kvm) 821 { 822 kvm->arch.kvm_ops->destroy_vm(kvm); 823 824 #ifdef CONFIG_PPC64 825 kvmppc_rtas_tokens_free(kvm); 826 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); 827 #endif 828 } 829 830 int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu) 831 { 832 unsigned long size = kvmppc_get_gpr(vcpu, 4); 833 unsigned long addr = kvmppc_get_gpr(vcpu, 5); 834 u64 buf; 835 int srcu_idx; 836 int ret; 837 838 if (!is_power_of_2(size) || (size > sizeof(buf))) 839 return H_TOO_HARD; 840 841 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 842 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf); 843 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); 844 if (ret != 0) 845 return H_TOO_HARD; 846 847 switch (size) { 848 case 1: 849 kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf); 850 break; 851 852 case 2: 853 kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf)); 854 break; 855 856 case 4: 857 kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf)); 858 break; 859 860 case 8: 861 kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf)); 862 break; 863 864 default: 865 BUG(); 866 } 867 868 return H_SUCCESS; 869 } 870 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load); 871 872 int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu) 873 { 874 unsigned long size = kvmppc_get_gpr(vcpu, 4); 875 unsigned long addr = kvmppc_get_gpr(vcpu, 5); 876 unsigned long val = kvmppc_get_gpr(vcpu, 6); 877 u64 buf; 878 int srcu_idx; 879 int ret; 880 881 switch (size) { 882 case 1: 883 *(u8 *)&buf = val; 884 break; 885 886 case 2: 887 *(__be16 *)&buf = cpu_to_be16(val); 888 break; 889 890 case 4: 891 *(__be32 *)&buf = cpu_to_be32(val); 892 break; 893 894 case 8: 895 *(__be64 *)&buf = cpu_to_be64(val); 896 break; 897 898 default: 899 return H_TOO_HARD; 900 } 901 902 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 903 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf); 904 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); 905 if (ret != 0) 906 return H_TOO_HARD; 907 908 return H_SUCCESS; 909 } 910 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store); 911 912 int kvmppc_core_check_processor_compat(void) 913 { 914 /* 915 * We always return 0 for book3s. We check 916 * for compatibility while loading the HV 917 * or PR module 918 */ 919 return 0; 920 } 921 922 int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall) 923 { 924 return kvm->arch.kvm_ops->hcall_implemented(hcall); 925 } 926 927 static int kvmppc_book3s_init(void) 928 { 929 int r; 930 931 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 932 if (r) 933 return r; 934 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 935 r = kvmppc_book3s_init_pr(); 936 #endif 937 return r; 938 939 } 940 941 static void kvmppc_book3s_exit(void) 942 { 943 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 944 kvmppc_book3s_exit_pr(); 945 #endif 946 kvm_exit(); 947 } 948 949 module_init(kvmppc_book3s_init); 950 module_exit(kvmppc_book3s_exit); 951 952 /* On 32bit this is our one and only kernel module */ 953 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 954 MODULE_ALIAS_MISCDEV(KVM_MINOR); 955 MODULE_ALIAS("devname:kvm"); 956 #endif 957