book3s_pr.c (94810ba4edc8fc49c68650306928245f6c0c99fa) book3s_pr.c (5deb8e7ad8ac7e3fcdfa042acff617f461b361c2)
1/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
8 *

--- 232 unchanged lines hidden (view full) ---

241 /* The page will get remapped properly on its next fault */
242 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
243}
244
245/*****************************************/
246
247static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
248{
1/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
8 *

--- 232 unchanged lines hidden (view full) ---

241 /* The page will get remapped properly on its next fault */
242 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
243}
244
245/*****************************************/
246
247static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
248{
249 ulong smsr = vcpu->arch.shared->msr;
249 ulong guest_msr = kvmppc_get_msr(vcpu);
250 ulong smsr = guest_msr;
250
251 /* Guest MSR values */
252 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
253 /* Process MSR values */
254 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
255 /* External providers the guest reserved */
251
252 /* Guest MSR values */
253 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
254 /* Process MSR values */
255 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
256 /* External providers the guest reserved */
256 smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
257 smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
257 /* 64-bit Process MSR values */
258#ifdef CONFIG_PPC_BOOK3S_64
259 smsr |= MSR_ISF | MSR_HV;
260#endif
261 vcpu->arch.shadow_msr = smsr;
262}
263
264static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
265{
258 /* 64-bit Process MSR values */
259#ifdef CONFIG_PPC_BOOK3S_64
260 smsr |= MSR_ISF | MSR_HV;
261#endif
262 vcpu->arch.shadow_msr = smsr;
263}
264
265static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
266{
266 ulong old_msr = vcpu->arch.shared->msr;
267 ulong old_msr = kvmppc_get_msr(vcpu);
267
268#ifdef EXIT_DEBUG
269 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
270#endif
271
272 msr &= to_book3s(vcpu)->msr_mask;
268
269#ifdef EXIT_DEBUG
270 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
271#endif
272
273 msr &= to_book3s(vcpu)->msr_mask;
273 vcpu->arch.shared->msr = msr;
274 kvmppc_set_msr_fast(vcpu, msr);
274 kvmppc_recalc_shadow_msr(vcpu);
275
276 if (msr & MSR_POW) {
277 if (!vcpu->arch.pending_exceptions) {
278 kvm_vcpu_block(vcpu);
279 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
280 vcpu->stat.halt_wakeup++;
281
282 /* Unset POW bit after we woke up */
283 msr &= ~MSR_POW;
275 kvmppc_recalc_shadow_msr(vcpu);
276
277 if (msr & MSR_POW) {
278 if (!vcpu->arch.pending_exceptions) {
279 kvm_vcpu_block(vcpu);
280 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
281 vcpu->stat.halt_wakeup++;
282
283 /* Unset POW bit after we woke up */
284 msr &= ~MSR_POW;
284 vcpu->arch.shared->msr = msr;
285 kvmppc_set_msr_fast(vcpu, msr);
285 }
286 }
287
286 }
287 }
288
288 if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
289 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
289 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
290 kvmppc_mmu_flush_segments(vcpu);
291 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
292
293 /* Preload magic page segment when in kernel mode */
294 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
295 struct kvm_vcpu_arch *a = &vcpu->arch;
296

--- 15 unchanged lines hidden (view full) ---

312 if (vcpu->arch.magic_page_pa &&
313 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
314 /* going from RTAS to normal kernel code */
315 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
316 ~0xFFFUL);
317 }
318
319 /* Preload FPU if it's enabled */
290 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
291 kvmppc_mmu_flush_segments(vcpu);
292 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
293
294 /* Preload magic page segment when in kernel mode */
295 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
296 struct kvm_vcpu_arch *a = &vcpu->arch;
297

--- 15 unchanged lines hidden (view full) ---

313 if (vcpu->arch.magic_page_pa &&
314 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
315 /* going from RTAS to normal kernel code */
316 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
317 ~0xFFFUL);
318 }
319
320 /* Preload FPU if it's enabled */
320 if (vcpu->arch.shared->msr & MSR_FP)
321 if (kvmppc_get_msr(vcpu) & MSR_FP)
321 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
322}
323
324void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
325{
326 u32 host_pvr;
327
328 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;

--- 104 unchanged lines hidden (view full) ---

433 kunmap_atomic(page);
434 put_page(hpage);
435}
436
437static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
438{
439 ulong mp_pa = vcpu->arch.magic_page_pa;
440
322 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
323}
324
325void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
326{
327 u32 host_pvr;
328
329 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;

--- 104 unchanged lines hidden (view full) ---

434 kunmap_atomic(page);
435 put_page(hpage);
436}
437
438static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
439{
440 ulong mp_pa = vcpu->arch.magic_page_pa;
441
441 if (!(vcpu->arch.shared->msr & MSR_SF))
442 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
442 mp_pa = (uint32_t)mp_pa;
443
444 if (unlikely(mp_pa) &&
445 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
446 return 1;
447 }
448
449 return kvm_is_visible_gfn(vcpu->kvm, gfn);

--- 4 unchanged lines hidden (view full) ---

454{
455 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
456 bool iswrite = false;
457 int r = RESUME_GUEST;
458 int relocated;
459 int page_found = 0;
460 struct kvmppc_pte pte;
461 bool is_mmio = false;
443 mp_pa = (uint32_t)mp_pa;
444
445 if (unlikely(mp_pa) &&
446 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
447 return 1;
448 }
449
450 return kvm_is_visible_gfn(vcpu->kvm, gfn);

--- 4 unchanged lines hidden (view full) ---

455{
456 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
457 bool iswrite = false;
458 int r = RESUME_GUEST;
459 int relocated;
460 int page_found = 0;
461 struct kvmppc_pte pte;
462 bool is_mmio = false;
462 bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
463 bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
463 bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
464 bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
464 u64 vsid;
465
466 relocated = data ? dr : ir;
467 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
468 iswrite = true;
469
470 /* Resolve real address if translation turned on */
471 if (relocated) {
472 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
473 } else {
474 pte.may_execute = true;
475 pte.may_read = true;
476 pte.may_write = true;
477 pte.raddr = eaddr & KVM_PAM;
478 pte.eaddr = eaddr;
479 pte.vpage = eaddr >> 12;
480 pte.page_size = MMU_PAGE_64K;
481 }
482
465 u64 vsid;
466
467 relocated = data ? dr : ir;
468 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
469 iswrite = true;
470
471 /* Resolve real address if translation turned on */
472 if (relocated) {
473 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
474 } else {
475 pte.may_execute = true;
476 pte.may_read = true;
477 pte.may_write = true;
478 pte.raddr = eaddr & KVM_PAM;
479 pte.eaddr = eaddr;
480 pte.vpage = eaddr >> 12;
481 pte.page_size = MMU_PAGE_64K;
482 }
483
483 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
484 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
484 case 0:
485 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
486 break;
487 case MSR_DR:
488 case MSR_IR:
489 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
490
485 case 0:
486 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
487 break;
488 case MSR_DR:
489 case MSR_IR:
490 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
491
491 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
492 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
492 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
493 else
494 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
495 pte.vpage |= vsid;
496
497 if (vsid == -1)
498 page_found = -EINVAL;
499 break;

--- 6 unchanged lines hidden (view full) ---

506 * so we can patch the executing code. This renders our guest
507 * NX-less.
508 */
509 pte.may_execute = !data;
510 }
511
512 if (page_found == -ENOENT) {
513 /* Page not found in guest PTE entries */
493 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
494 else
495 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
496 pte.vpage |= vsid;
497
498 if (vsid == -1)
499 page_found = -EINVAL;
500 break;

--- 6 unchanged lines hidden (view full) ---

507 * so we can patch the executing code. This renders our guest
508 * NX-less.
509 */
510 pte.may_execute = !data;
511 }
512
513 if (page_found == -ENOENT) {
514 /* Page not found in guest PTE entries */
514 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
515 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
516 vcpu->arch.shared->msr |=
517 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
515 u64 ssrr1 = vcpu->arch.shadow_srr1;
516 u64 msr = kvmppc_get_msr(vcpu);
517 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
518 kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr);
519 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
518 kvmppc_book3s_queue_irqprio(vcpu, vec);
519 } else if (page_found == -EPERM) {
520 /* Storage protection */
520 kvmppc_book3s_queue_irqprio(vcpu, vec);
521 } else if (page_found == -EPERM) {
522 /* Storage protection */
521 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
522 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
523 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
524 vcpu->arch.shared->msr |=
525 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
523 u32 dsisr = vcpu->arch.fault_dsisr;
524 u64 ssrr1 = vcpu->arch.shadow_srr1;
525 u64 msr = kvmppc_get_msr(vcpu);
526 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
527 dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT;
528 kvmppc_set_dsisr(vcpu, dsisr);
529 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
526 kvmppc_book3s_queue_irqprio(vcpu, vec);
527 } else if (page_found == -EINVAL) {
528 /* Page not found in guest SLB */
530 kvmppc_book3s_queue_irqprio(vcpu, vec);
531 } else if (page_found == -EINVAL) {
532 /* Page not found in guest SLB */
529 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
533 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
530 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
531 } else if (!is_mmio &&
532 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
533 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
534 /*
535 * There is already a host HPTE there, presumably
536 * a read-only one for a page the guest thinks
537 * is writable, so get rid of it first.

--- 71 unchanged lines hidden (view full) ---

609static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
610{
611 ulong srr0 = kvmppc_get_pc(vcpu);
612 u32 last_inst = kvmppc_get_last_inst(vcpu);
613 int ret;
614
615 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
616 if (ret == -ENOENT) {
534 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
535 } else if (!is_mmio &&
536 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
537 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
538 /*
539 * There is already a host HPTE there, presumably
540 * a read-only one for a page the guest thinks
541 * is writable, so get rid of it first.

--- 71 unchanged lines hidden (view full) ---

613static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
614{
615 ulong srr0 = kvmppc_get_pc(vcpu);
616 u32 last_inst = kvmppc_get_last_inst(vcpu);
617 int ret;
618
619 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
620 if (ret == -ENOENT) {
617 ulong msr = vcpu->arch.shared->msr;
621 ulong msr = kvmppc_get_msr(vcpu);
618
619 msr = kvmppc_set_field(msr, 33, 33, 1);
620 msr = kvmppc_set_field(msr, 34, 36, 0);
622
623 msr = kvmppc_set_field(msr, 33, 33, 1);
624 msr = kvmppc_set_field(msr, 34, 36, 0);
621 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
625 msr = kvmppc_set_field(msr, 42, 47, 0);
626 kvmppc_set_msr_fast(vcpu, msr);
622 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
623 return EMULATE_AGAIN;
624 }
625
626 return EMULATE_DONE;
627}
628
629static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)

--- 16 unchanged lines hidden (view full) ---

646 ulong msr)
647{
648 struct thread_struct *t = &current->thread;
649
650 /* When we have paired singles, we emulate in software */
651 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
652 return RESUME_GUEST;
653
627 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
628 return EMULATE_AGAIN;
629 }
630
631 return EMULATE_DONE;
632}
633
634static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)

--- 16 unchanged lines hidden (view full) ---

651 ulong msr)
652{
653 struct thread_struct *t = &current->thread;
654
655 /* When we have paired singles, we emulate in software */
656 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
657 return RESUME_GUEST;
658
654 if (!(vcpu->arch.shared->msr & msr)) {
659 if (!(kvmppc_get_msr(vcpu) & msr)) {
655 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
656 return RESUME_GUEST;
657 }
658
659 if (msr == MSR_VSX) {
660 /* No VSX? Give an illegal instruction interrupt */
661#ifdef CONFIG_VSX
662 if (!cpu_has_feature(CPU_FTR_VSX))

--- 124 unchanged lines hidden (view full) ---

787 /*
788 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
789 * so we can't use the NX bit inside the guest. Let's cross our fingers,
790 * that no guest that needs the dcbz hack does NX.
791 */
792 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
793 r = RESUME_GUEST;
794 } else {
660 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
661 return RESUME_GUEST;
662 }
663
664 if (msr == MSR_VSX) {
665 /* No VSX? Give an illegal instruction interrupt */
666#ifdef CONFIG_VSX
667 if (!cpu_has_feature(CPU_FTR_VSX))

--- 124 unchanged lines hidden (view full) ---

792 /*
793 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
794 * so we can't use the NX bit inside the guest. Let's cross our fingers,
795 * that no guest that needs the dcbz hack does NX.
796 */
797 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
798 r = RESUME_GUEST;
799 } else {
795 vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
800 u64 msr = kvmppc_get_msr(vcpu);
801 msr |= shadow_srr1 & 0x58000000;
802 kvmppc_set_msr_fast(vcpu, msr);
796 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
797 r = RESUME_GUEST;
798 }
799 break;
800 }
801 case BOOK3S_INTERRUPT_DATA_STORAGE:
802 {
803 ulong dar = kvmppc_get_fault_dar(vcpu);

--- 23 unchanged lines hidden (view full) ---

827 * protection faults due to us mapping a page read-only
828 * when the guest thinks it is writable.
829 */
830 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
831 int idx = srcu_read_lock(&vcpu->kvm->srcu);
832 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
833 srcu_read_unlock(&vcpu->kvm->srcu, idx);
834 } else {
803 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
804 r = RESUME_GUEST;
805 }
806 break;
807 }
808 case BOOK3S_INTERRUPT_DATA_STORAGE:
809 {
810 ulong dar = kvmppc_get_fault_dar(vcpu);

--- 23 unchanged lines hidden (view full) ---

834 * protection faults due to us mapping a page read-only
835 * when the guest thinks it is writable.
836 */
837 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
838 int idx = srcu_read_lock(&vcpu->kvm->srcu);
839 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
840 srcu_read_unlock(&vcpu->kvm->srcu, idx);
841 } else {
835 vcpu->arch.shared->dar = dar;
836 vcpu->arch.shared->dsisr = fault_dsisr;
842 kvmppc_set_dar(vcpu, dar);
843 kvmppc_set_dsisr(vcpu, fault_dsisr);
837 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
838 r = RESUME_GUEST;
839 }
840 break;
841 }
842 case BOOK3S_INTERRUPT_DATA_SEGMENT:
843 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
844 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
845 r = RESUME_GUEST;
846 }
847 break;
848 }
849 case BOOK3S_INTERRUPT_DATA_SEGMENT:
850 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
844 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
851 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
845 kvmppc_book3s_queue_irqprio(vcpu,
846 BOOK3S_INTERRUPT_DATA_SEGMENT);
847 }
848 r = RESUME_GUEST;
849 break;
850 case BOOK3S_INTERRUPT_INST_SEGMENT:
851 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
852 kvmppc_book3s_queue_irqprio(vcpu,

--- 21 unchanged lines hidden (view full) ---

874 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
875 {
876 enum emulation_result er;
877 ulong flags;
878
879program_interrupt:
880 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
881
852 kvmppc_book3s_queue_irqprio(vcpu,
853 BOOK3S_INTERRUPT_DATA_SEGMENT);
854 }
855 r = RESUME_GUEST;
856 break;
857 case BOOK3S_INTERRUPT_INST_SEGMENT:
858 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
859 kvmppc_book3s_queue_irqprio(vcpu,

--- 21 unchanged lines hidden (view full) ---

881 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
882 {
883 enum emulation_result er;
884 ulong flags;
885
886program_interrupt:
887 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
888
882 if (vcpu->arch.shared->msr & MSR_PR) {
889 if (kvmppc_get_msr(vcpu) & MSR_PR) {
883#ifdef EXIT_DEBUG
884 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
885#endif
886 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
887 (INS_DCBZ & 0xfffffff7)) {
888 kvmppc_core_queue_program(vcpu, flags);
889 r = RESUME_GUEST;
890 break;

--- 25 unchanged lines hidden (view full) ---

916 default:
917 BUG();
918 }
919 break;
920 }
921 case BOOK3S_INTERRUPT_SYSCALL:
922 if (vcpu->arch.papr_enabled &&
923 (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
890#ifdef EXIT_DEBUG
891 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
892#endif
893 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
894 (INS_DCBZ & 0xfffffff7)) {
895 kvmppc_core_queue_program(vcpu, flags);
896 r = RESUME_GUEST;
897 break;

--- 25 unchanged lines hidden (view full) ---

923 default:
924 BUG();
925 }
926 break;
927 }
928 case BOOK3S_INTERRUPT_SYSCALL:
929 if (vcpu->arch.papr_enabled &&
930 (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
924 !(vcpu->arch.shared->msr & MSR_PR)) {
931 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
925 /* SC 1 papr hypercalls */
926 ulong cmd = kvmppc_get_gpr(vcpu, 3);
927 int i;
928
929#ifdef CONFIG_PPC_BOOK3S_64
930 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
931 r = RESUME_GUEST;
932 break;

--- 15 unchanged lines hidden (view full) ---

948 u64 *gprs = run->osi.gprs;
949 int i;
950
951 run->exit_reason = KVM_EXIT_OSI;
952 for (i = 0; i < 32; i++)
953 gprs[i] = kvmppc_get_gpr(vcpu, i);
954 vcpu->arch.osi_needed = 1;
955 r = RESUME_HOST_NV;
932 /* SC 1 papr hypercalls */
933 ulong cmd = kvmppc_get_gpr(vcpu, 3);
934 int i;
935
936#ifdef CONFIG_PPC_BOOK3S_64
937 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
938 r = RESUME_GUEST;
939 break;

--- 15 unchanged lines hidden (view full) ---

955 u64 *gprs = run->osi.gprs;
956 int i;
957
958 run->exit_reason = KVM_EXIT_OSI;
959 for (i = 0; i < 32; i++)
960 gprs[i] = kvmppc_get_gpr(vcpu, i);
961 vcpu->arch.osi_needed = 1;
962 r = RESUME_HOST_NV;
956 } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
963 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
957 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
958 /* KVM PV hypercalls */
959 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
960 r = RESUME_GUEST;
961 } else {
962 /* Guest syscalls */
963 vcpu->stat.syscall_exits++;
964 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);

--- 24 unchanged lines hidden (view full) ---

989 default:
990 /* nothing to worry about - go again */
991 break;
992 }
993 break;
994 }
995 case BOOK3S_INTERRUPT_ALIGNMENT:
996 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
964 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
965 /* KVM PV hypercalls */
966 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
967 r = RESUME_GUEST;
968 } else {
969 /* Guest syscalls */
970 vcpu->stat.syscall_exits++;
971 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);

--- 24 unchanged lines hidden (view full) ---

996 default:
997 /* nothing to worry about - go again */
998 break;
999 }
1000 break;
1001 }
1002 case BOOK3S_INTERRUPT_ALIGNMENT:
1003 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
997 vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
998 kvmppc_get_last_inst(vcpu));
999 vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
1000 kvmppc_get_last_inst(vcpu));
1004 u32 last_inst = kvmppc_get_last_inst(vcpu);
1005 u32 dsisr;
1006 u64 dar;
1007
1008 dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
1009 dar = kvmppc_alignment_dar(vcpu, last_inst);
1010
1011 kvmppc_set_dsisr(vcpu, dsisr);
1012 kvmppc_set_dar(vcpu, dar);
1013
1001 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1002 }
1003 r = RESUME_GUEST;
1004 break;
1005 case BOOK3S_INTERRUPT_MACHINE_CHECK:
1006 case BOOK3S_INTERRUPT_TRACE:
1007 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1008 r = RESUME_GUEST;

--- 48 unchanged lines hidden (view full) ---

1057 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1058 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1059 for (i = 0; i < 64; i++) {
1060 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1061 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1062 }
1063 } else {
1064 for (i = 0; i < 16; i++)
1014 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1015 }
1016 r = RESUME_GUEST;
1017 break;
1018 case BOOK3S_INTERRUPT_MACHINE_CHECK:
1019 case BOOK3S_INTERRUPT_TRACE:
1020 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1021 r = RESUME_GUEST;

--- 48 unchanged lines hidden (view full) ---

1070 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1071 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1072 for (i = 0; i < 64; i++) {
1073 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1074 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1075 }
1076 } else {
1077 for (i = 0; i < 16; i++)
1065 sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
1078 sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
1066
1067 for (i = 0; i < 8; i++) {
1068 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1069 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1070 }
1071 }
1072
1073 return 0;

--- 119 unchanged lines hidden (view full) ---

1193 goto free_shadow_vcpu;
1194
1195 err = -ENOMEM;
1196 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1197 if (!p)
1198 goto uninit_vcpu;
1199 /* the real shared page fills the last 4k of our page */
1200 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
1079
1080 for (i = 0; i < 8; i++) {
1081 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1082 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1083 }
1084 }
1085
1086 return 0;

--- 119 unchanged lines hidden (view full) ---

1206 goto free_shadow_vcpu;
1207
1208 err = -ENOMEM;
1209 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1210 if (!p)
1211 goto uninit_vcpu;
1212 /* the real shared page fills the last 4k of our page */
1213 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
1201
1202#ifdef CONFIG_PPC_BOOK3S_64
1214#ifdef CONFIG_PPC_BOOK3S_64
1215 /* Always start the shared struct in native endian mode */
1216#ifdef __BIG_ENDIAN__
1217 vcpu->arch.shared_big_endian = true;
1218#else
1219 vcpu->arch.shared_big_endian = false;
1220#endif
1221
1203 /*
1204 * Default to the same as the host if we're on sufficiently
1205 * recent machine that we have 1TB segments;
1206 * otherwise default to PPC970FX.
1207 */
1208 vcpu->arch.pvr = 0x3C0301;
1209 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1210 vcpu->arch.pvr = mfspr(SPRN_PVR);

--- 77 unchanged lines hidden (view full) ---

1288
1289#ifdef CONFIG_VSX
1290 /* Save VSX state in thread_struct */
1291 if (current->thread.regs->msr & MSR_VSX)
1292 __giveup_vsx(current);
1293#endif
1294
1295 /* Preload FPU if it's enabled */
1222 /*
1223 * Default to the same as the host if we're on sufficiently
1224 * recent machine that we have 1TB segments;
1225 * otherwise default to PPC970FX.
1226 */
1227 vcpu->arch.pvr = 0x3C0301;
1228 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1229 vcpu->arch.pvr = mfspr(SPRN_PVR);

--- 77 unchanged lines hidden (view full) ---

1307
1308#ifdef CONFIG_VSX
1309 /* Save VSX state in thread_struct */
1310 if (current->thread.regs->msr & MSR_VSX)
1311 __giveup_vsx(current);
1312#endif
1313
1314 /* Preload FPU if it's enabled */
1296 if (vcpu->arch.shared->msr & MSR_FP)
1315 if (kvmppc_get_msr(vcpu) & MSR_FP)
1297 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1298
1299 kvmppc_fix_ee_before_entry();
1300
1301 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1302
1303 /* No need for kvm_guest_exit. It's done in handle_exit.
1304 We also get here with interrupts enabled. */

--- 247 unchanged lines hidden ---
1316 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1317
1318 kvmppc_fix_ee_before_entry();
1319
1320 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1321
1322 /* No need for kvm_guest_exit. It's done in handle_exit.
1323 We also get here with interrupts enabled. */

--- 247 unchanged lines hidden ---