Lines Matching full:arch

14  * This file is derived from arch/powerpc/kvm/book3s.c,
243 cpu = READ_ONCE(vcpu->arch.thread_cpu);
280 * Updates to busy_stolen are protected by arch.tbacct_lock;
320 struct kvmppc_vcore *vc = vcpu->arch.vcore;
325 if (vcpu->arch.busy_preempt != TB_NIL) {
326 WARN_ON_ONCE(vcpu->arch.state != KVMPPC_VCPU_BUSY_IN_HOST);
327 vc->stolen_tb += mftb() - vcpu->arch.busy_preempt;
328 vcpu->arch.busy_preempt = TB_NIL;
344 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
345 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
346 vcpu->arch.busy_preempt != TB_NIL) {
347 vcpu->arch.busy_stolen += now - vcpu->arch.busy_preempt;
348 vcpu->arch.busy_preempt = TB_NIL;
350 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
355 struct kvmppc_vcore *vc = vcpu->arch.vcore;
364 WARN_ON_ONCE(vcpu->arch.state == KVMPPC_VCPU_RUNNABLE);
370 vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
371 vcpu->arch.busy_preempt = mftb();
380 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
381 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
382 vcpu->arch.busy_preempt = now;
383 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
388 vcpu->arch.pvr = pvr;
418 struct kvmppc_vcore *vc = vcpu->arch.vcore;
492 vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap);
498 vcpu->arch.regs.ctr, vcpu->arch.regs.link);
500 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
502 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
504 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
506 vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
507 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
509 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
510 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
511 for (r = 0; r < vcpu->arch.slb_max; ++r)
513 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
515 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
516 vcpu->arch.last_inst);
536 spin_lock(&vcpu->arch.vpa_update_lock);
542 spin_unlock(&vcpu->arch.vpa_update_lock);
605 spin_lock(&tvcpu->arch.vpa_update_lock);
618 vpap = &tvcpu->arch.vpa;
629 if (!vpa_is_registered(&tvcpu->arch.vpa))
632 vpap = &tvcpu->arch.dtl;
639 if (!vpa_is_registered(&tvcpu->arch.vpa))
642 vpap = &tvcpu->arch.slb_shadow;
649 if (vpa_is_registered(&tvcpu->arch.dtl) ||
650 vpa_is_registered(&tvcpu->arch.slb_shadow))
653 vpap = &tvcpu->arch.vpa;
658 vpap = &tvcpu->arch.dtl;
663 vpap = &tvcpu->arch.slb_shadow;
674 spin_unlock(&tvcpu->arch.vpa_update_lock);
697 spin_unlock(&vcpu->arch.vpa_update_lock);
702 spin_lock(&vcpu->arch.vpa_update_lock);
734 if (!(vcpu->arch.vpa.update_pending ||
735 vcpu->arch.slb_shadow.update_pending ||
736 vcpu->arch.dtl.update_pending))
739 spin_lock(&vcpu->arch.vpa_update_lock);
740 if (vcpu->arch.vpa.update_pending) {
741 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa, &old_vpa);
748 if (vcpu->arch.vpa.pinned_addr) {
749 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
751 kvmhv_nestedv2_set_vpa(vcpu, __pa(vcpu->arch.vpa.pinned_addr));
754 if (vcpu->arch.dtl.update_pending) {
755 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl, &old_vpa);
759 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
760 vcpu->arch.dtl_index = 0;
762 if (vcpu->arch.slb_shadow.update_pending) {
763 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow, &old_vpa);
769 spin_unlock(&vcpu->arch.vpa_update_lock);
799 dt = vcpu->arch.dtl_ptr;
806 dt->processor_id = cpu_to_be16(pcpu + vcpu->arch.ptid);
813 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
816 if (dt == vcpu->arch.dtl.pinned_end)
817 dt = vcpu->arch.dtl.pinned_addr;
818 vcpu->arch.dtl_ptr = dt;
821 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
823 /* vcpu->arch.dtl.dirty is set by the caller */
835 vpa = vcpu->arch.vpa.pinned_addr;
842 stolen = core_stolen - vcpu->arch.stolen_logged;
843 vcpu->arch.stolen_logged = core_stolen;
844 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
845 stolen += vcpu->arch.busy_stolen;
846 vcpu->arch.busy_stolen = 0;
847 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
853 vcpu->arch.vpa.dirty = true;
864 vpa = vcpu->arch.vpa.pinned_addr;
869 stolen_delta = stolen - vcpu->arch.stolen_logged;
870 vcpu->arch.stolen_logged = stolen;
876 vcpu->arch.vpa.dirty = true;
885 if (vcpu->arch.doorbell_request)
895 vc = vcpu->arch.vcore;
946 if (!vcpu->kvm->arch.dawr1_enabled)
1045 struct kvmppc_vcore *vcore = target->arch.vcore;
1060 if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
1075 spin_lock(&vcpu->arch.vpa_update_lock);
1076 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
1079 spin_unlock(&vcpu->arch.vpa_update_lock);
1104 do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid,
1140 do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid,
1155 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
1214 tvcpu->arch.prodded = 1;
1216 if (tvcpu->arch.ceded)
1239 if (list_empty(&kvm->arch.rtas_tokens))
1353 vcpu->arch.hcall_needed = 0;
1357 vcpu->arch.hcall_needed = 0;
1407 * Instead the kvm->arch.secure_guest flag is checked inside
1418 vcpu->arch.hcall_needed = 0;
1432 vcpu->arch.ceded = 1;
1434 if (vcpu->arch.prodded) {
1435 vcpu->arch.prodded = 0;
1437 vcpu->arch.ceded = 0;
1489 vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
1508 nthreads = vcpu->kvm->arch.emul_smt_mode;
1549 thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1);
1556 if (arg >= kvm->arch.emul_smt_mode)
1561 if (!tvcpu->arch.doorbell_request) {
1562 tvcpu->arch.doorbell_request = 1;
1570 vcpu->arch.vcore->dpdes = 0;
1571 vcpu->arch.doorbell_request = 0;
1602 if (!(vcpu->arch.hfscr_permitted & HFSCR_PM))
1612 if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB))
1622 if (!(vcpu->arch.hfscr_permitted & HFSCR_TM))
1649 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1650 vcpu->arch.shregs.msr);
1653 run->hw.hardware_exit_reason = vcpu->arch.trap;
1658 switch (vcpu->arch.trap) {
1662 vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
1688 machine_check_print_event_info(&vcpu->arch.mce_evt,false, true);
1696 if (!vcpu->kvm->arch.fwnmi_enabled) {
1706 run->hw.hardware_exit_reason = vcpu->arch.trap;
1710 if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED)
1772 vcpu->arch.hcall_needed = 1;
1791 unlikely(vcpu->arch.fault_dsisr == HDSISR_CANARY)) {
1807 if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) {
1810 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
1816 vsid = vcpu->kvm->arch.vrma_slb_v;
1818 vsid = vcpu->arch.fault_gpa;
1820 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
1821 vsid, vcpu->arch.fault_dsisr, true);
1829 vcpu->arch.fault_dar, err);
1838 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
1839 vcpu->arch.fault_dsisr = __kvmppc_get_msr_hv(vcpu) &
1849 vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
1854 if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) {
1856 vcpu->arch.fault_dsisr |
1863 vsid = vcpu->kvm->arch.vrma_slb_v;
1865 vsid = vcpu->arch.fault_gpa;
1867 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
1868 vsid, vcpu->arch.fault_dsisr, false);
1889 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
1890 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
1891 swab32(vcpu->arch.emul_inst) :
1892 vcpu->arch.emul_inst;
1959 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1961 run->hw.hardware_exit_reason = vcpu->arch.trap;
1987 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1992 switch (vcpu->arch.trap) {
2009 vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
2027 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
2042 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
2043 vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
2046 vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
2071 vcpu->arch.trap = 0;
2107 sregs->pvr = vcpu->arch.pvr;
2108 for (i = 0; i < vcpu->arch.slb_max; i++) {
2109 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
2110 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
2122 if (sregs->pvr != vcpu->arch.pvr)
2126 for (i = 0; i < vcpu->arch.slb_nr; i++) {
2128 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
2129 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
2133 vcpu->arch.slb_max = j;
2185 struct kvmppc_vcore *vc = vcpu->arch.vcore;
2215 if (vcpu->arch.vcore != vc)
2218 vcpu->arch.intr_msr |= MSR_LE;
2220 vcpu->arch.intr_msr &= ~MSR_LE;
2244 *val = get_reg_val(id, vcpu->arch.dabr);
2247 *val = get_reg_val(id, vcpu->arch.dabrx);
2275 *val = get_reg_val(id, vcpu->arch.mmcrs);
2286 *val = get_reg_val(id, vcpu->arch.spmc[i]);
2317 *val = get_reg_val(id, vcpu->arch.doorbell_request);
2319 *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
2349 *val = get_reg_val(id, vcpu->arch.csigr);
2352 *val = get_reg_val(id, vcpu->arch.tacr);
2355 *val = get_reg_val(id, vcpu->arch.tcscr);
2361 *val = get_reg_val(id, vcpu->arch.acop);
2367 *val = get_reg_val(id, vcpu->arch.tid);
2370 *val = get_reg_val(id, vcpu->arch.psscr);
2373 spin_lock(&vcpu->arch.vpa_update_lock);
2374 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
2375 spin_unlock(&vcpu->arch.vpa_update_lock);
2378 spin_lock(&vcpu->arch.vpa_update_lock);
2379 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
2380 val->vpaval.length = vcpu->arch.slb_shadow.len;
2381 spin_unlock(&vcpu->arch.vpa_update_lock);
2384 spin_lock(&vcpu->arch.vpa_update_lock);
2385 val->vpaval.addr = vcpu->arch.dtl.next_gpa;
2386 val->vpaval.length = vcpu->arch.dtl.len;
2387 spin_unlock(&vcpu->arch.vpa_update_lock);
2401 *val = get_reg_val(id, vcpu->arch.tfhar);
2404 *val = get_reg_val(id, vcpu->arch.tfiar);
2407 *val = get_reg_val(id, vcpu->arch.texasr);
2411 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
2419 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
2422 val->vval = vcpu->arch.vr_tm.vr[i-32];
2429 *val = get_reg_val(id, vcpu->arch.cr_tm);
2432 *val = get_reg_val(id, vcpu->arch.xer_tm);
2435 *val = get_reg_val(id, vcpu->arch.lr_tm);
2438 *val = get_reg_val(id, vcpu->arch.ctr_tm);
2441 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
2444 *val = get_reg_val(id, vcpu->arch.amr_tm);
2447 *val = get_reg_val(id, vcpu->arch.ppr_tm);
2450 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
2454 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
2459 *val = get_reg_val(id, vcpu->arch.dscr_tm);
2462 *val = get_reg_val(id, vcpu->arch.tar_tm);
2472 *val = get_reg_val(id, vcpu->arch.online);
2475 *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr);
2502 vcpu->arch.dabr = set_reg_val(id, *val);
2505 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
2533 vcpu->arch.mmcrs = set_reg_val(id, *val);
2544 vcpu->arch.spmc[i] = set_reg_val(id, *val);
2569 vcpu->arch.doorbell_request = set_reg_val(id, *val) & 1;
2571 vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
2604 vcpu->arch.csigr = set_reg_val(id, *val);
2607 vcpu->arch.tacr = set_reg_val(id, *val);
2610 vcpu->arch.tcscr = set_reg_val(id, *val);
2616 vcpu->arch.acop = set_reg_val(id, *val);
2622 vcpu->arch.tid = set_reg_val(id, *val);
2625 vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS;
2630 if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
2631 vcpu->arch.dtl.next_gpa))
2633 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
2639 if (addr && !vcpu->arch.vpa.next_gpa)
2641 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
2648 !vcpu->arch.vpa.next_gpa))
2651 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
2684 vcpu->arch.tfhar = set_reg_val(id, *val);
2687 vcpu->arch.tfiar = set_reg_val(id, *val);
2690 vcpu->arch.texasr = set_reg_val(id, *val);
2694 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
2702 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
2705 vcpu->arch.vr_tm.vr[i-32] = val->vval;
2711 vcpu->arch.cr_tm = set_reg_val(id, *val);
2714 vcpu->arch.xer_tm = set_reg_val(id, *val);
2717 vcpu->arch.lr_tm = set_reg_val(id, *val);
2720 vcpu->arch.ctr_tm = set_reg_val(id, *val);
2723 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
2726 vcpu->arch.amr_tm = set_reg_val(id, *val);
2729 vcpu->arch.ppr_tm = set_reg_val(id, *val);
2732 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
2736 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
2741 vcpu->arch.dscr_tm = set_reg_val(id, *val);
2744 vcpu->arch.tar_tm = set_reg_val(id, *val);
2755 if (i && !vcpu->arch.online)
2756 atomic_inc(&vcpu->arch.vcore->online_count);
2757 else if (!i && vcpu->arch.online)
2758 atomic_dec(&vcpu->arch.vcore->online_count);
2759 vcpu->arch.online = i;
2762 vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val);
2802 vcore->lpcr = kvm->arch.lpcr;
2816 {"vcpu_entry", offsetof(struct kvm_vcpu, arch.vcpu_entry)},
2817 {"guest_entry", offsetof(struct kvm_vcpu, arch.guest_entry)},
2818 {"in_guest", offsetof(struct kvm_vcpu, arch.in_guest)},
2819 {"guest_exit", offsetof(struct kvm_vcpu, arch.guest_exit)},
2820 {"vcpu_exit", offsetof(struct kvm_vcpu, arch.vcpu_exit)},
2821 {"hypercall", offsetof(struct kvm_vcpu, arch.hcall)},
2822 {"page_fault", offsetof(struct kvm_vcpu, arch.pg_fault)},
2824 {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
2825 {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
2826 {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
2827 {"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
2828 {"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
2972 vcpu->arch.shared = &vcpu->arch.shregs;
2979 vcpu->arch.shared_big_endian = true;
2981 vcpu->arch.shared_big_endian = false;
2986 err = kvmhv_nestedv2_vcpu_create(vcpu, &vcpu->arch.nestedv2_io);
3000 spin_lock_init(&vcpu->arch.vpa_update_lock);
3001 spin_lock_init(&vcpu->arch.tbacct_lock);
3002 vcpu->arch.busy_preempt = TB_NIL;
3004 vcpu->arch.intr_msr = MSR_SF | MSR_ME;
3029 vcpu->arch.hfscr |= HFSCR_TM;
3031 vcpu->arch.hfscr_permitted = kvmppc_get_hfscr_hv(vcpu);
3040 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
3042 init_waitqueue_head(&vcpu->arch.cpu_run);
3048 if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) {
3052 BUG_ON(kvm->arch.smt_mode != 1);
3056 core = id / kvm->arch.smt_mode;
3059 vcore = kvm->arch.vcores[core];
3070 id & ~(kvm->arch.smt_mode - 1));
3071 mutex_lock(&kvm->arch.mmu_setup_lock);
3072 kvm->arch.vcores[core] = vcore;
3073 kvm->arch.online_vcores++;
3074 mutex_unlock(&kvm->arch.mmu_setup_lock);
3085 vcpu->arch.vcore = vcore;
3086 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
3087 vcpu->arch.thread_cpu = -1;
3088 vcpu->arch.prev_cpu = -1;
3090 vcpu->arch.cpu_type = KVM_CPU_3S_64;
3123 if (!kvm->arch.online_vcores) {
3124 kvm->arch.smt_mode = smt_mode;
3125 kvm->arch.emul_smt_mode = esmt;
3142 spin_lock(&vcpu->arch.vpa_update_lock);
3143 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
3144 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
3145 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
3146 spin_unlock(&vcpu->arch.vpa_update_lock);
3148 kvmhv_nestedv2_vcpu_free(vcpu, &vcpu->arch.nestedv2_io);
3169 hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
3170 vcpu->arch.timer_running = 1;
3180 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
3182 spin_lock_irq(&vcpu->arch.tbacct_lock);
3184 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
3185 vcpu->arch.stolen_logged;
3186 vcpu->arch.busy_preempt = now;
3187 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
3188 spin_unlock_irq(&vcpu->arch.tbacct_lock);
3190 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL);
3242 struct kvm_nested_guest *nested = vcpu->arch.nested;
3249 need_tlb_flush = &kvm->arch.need_tlb_flush;
3287 if (kvm->arch.lpcr & LPCR_GTSE)
3295 struct kvm_nested_guest *nested = vcpu->arch.nested;
3303 prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id];
3305 prev_cpu = vcpu->arch.prev_cpu;
3328 nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
3330 vcpu->arch.prev_cpu = pcpu;
3341 if (vcpu->arch.timer_running) {
3342 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
3343 vcpu->arch.timer_running = 0;
3345 cpu += vcpu->arch.ptid;
3347 vcpu->arch.thread_cpu = cpu;
3587 if (signal_pending(vcpu->arch.run_task))
3588 vcpu->arch.ret = -EINTR;
3589 else if (vcpu->arch.vpa.update_pending ||
3590 vcpu->arch.slb_shadow.update_pending ||
3591 vcpu->arch.dtl.update_pending)
3592 vcpu->arch.ret = RESUME_GUEST;
3596 wake_up(&vcpu->arch.cpu_run);
3610 if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) {
3639 if (!vc->kvm->arch.mmu_ready)
3642 if (signal_pending(vcpu->arch.run_task))
3662 * so any vcpus becoming runnable will have their arch.trap
3674 if (vcpu->arch.trap)
3676 vcpu->arch.run_task);
3678 vcpu->arch.ret = ret;
3679 vcpu->arch.trap = 0;
3682 if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
3683 if (vcpu->arch.pending_exceptions)
3685 if (vcpu->arch.ceded)
3691 wake_up(&vcpu->arch.cpu_run);
3707 wake_up(&vcpu->arch.cpu_run);
3805 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE)
3829 vcpu->arch.ret = -EBUSY;
3831 wake_up(&vcpu->arch.cpu_run);
3963 * It updates vcpu->cpu and vcpu->arch.thread_cpu
3972 if (!vcpu->arch.ptid)
3974 active |= 1 << (thr + vcpu->arch.ptid);
4103 struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
4107 vcpu->arch.vpa.dirty = 1;
4170 vcpu->arch.l1_to_l2_cs = l1_to_l2_ns;
4171 vcpu->arch.l2_to_l1_cs = l2_to_l1_ns;
4172 vcpu->arch.l2_runtime_agg = l2_runtime_ns;
4196 struct kvm_vcpu_arch *arch;
4200 arch = &vcpu->arch;
4201 return arch->l1_to_l2_cs;
4211 struct kvm_vcpu_arch *arch;
4215 arch = &vcpu->arch;
4216 return arch->l2_to_l1_cs;
4226 struct kvm_vcpu_arch *arch;
4230 arch = &vcpu->arch;
4231 return arch->l2_runtime_agg;
4257 if (vcpu->arch.doorbell_request) {
4258 vcpu->arch.doorbell_request = 0;
4262 io = &vcpu->arch.nestedv2_io;
4275 accumulate_time(vcpu, &vcpu->arch.in_guest);
4276 rc = plpar_guest_run_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id,
4289 accumulate_time(vcpu, &vcpu->arch.guest_exit);
4338 if (vcpu->arch.psscr != host_psscr)
4339 mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
4344 vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
4346 if (vcpu->arch.nested) {
4347 hvregs.lpid = vcpu->arch.nested->shadow_lpid;
4348 hvregs.vcpu_token = vcpu->arch.nested_vcpu_id;
4350 hvregs.lpid = vcpu->kvm->arch.lpid;
4361 if (vcpu->arch.doorbell_request)
4362 vcpu->arch.doorbell_request = 0;
4380 mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
4381 mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
4383 accumulate_time(vcpu, &vcpu->arch.in_guest);
4385 __pa(&vcpu->arch.regs));
4386 accumulate_time(vcpu, &vcpu->arch.guest_exit);
4389 vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
4390 vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
4391 vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
4392 vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
4400 vcpu->arch.dec_expires = dec + (*tb + kvmppc_get_tb_offset(vcpu));
4405 if (vcpu->arch.psscr != host_psscr)
4418 struct kvm_nested_guest *nested = vcpu->arch.nested;
4430 vcpu->arch.ceded = 0;
4477 vcpu->arch.ceded = 0;
4509 vcpu->arch.slb_max = 0;
4526 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
4527 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
4532 finish_wait(&vcpu->arch.cpu_run, &wait);
4558 return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr <
4559 vcpu->arch.xive_saved_state.cppr;
4570 if (vcpu->arch.pending_exceptions || vcpu->arch.prodded ||
4579 if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu))
4724 mutex_lock(&kvm->arch.mmu_setup_lock);
4725 if (!kvm->arch.mmu_ready) {
4731 kvm->arch.mmu_ready = 1;
4734 mutex_unlock(&kvm->arch.mmu_setup_lock);
4748 vcpu->arch.ret = RESUME_GUEST;
4749 vcpu->arch.trap = 0;
4755 vc = vcpu->arch.vcore;
4757 vcpu->arch.ceded = 0;
4758 vcpu->arch.run_task = current;
4759 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
4760 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
4761 vcpu->arch.busy_preempt = TB_NIL;
4762 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu);
4783 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
4786 if (!vcpu->kvm->arch.mmu_ready) {
4794 vcpu->arch.ret = r;
4808 if (signal_pending(v->arch.run_task)) {
4812 v->arch.ret = -EINTR;
4813 wake_up(&v->arch.cpu_run);
4816 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
4821 n_ceded += v->arch.ceded;
4823 v->arch.ceded = 0;
4840 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
4849 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
4853 vcpu->arch.ret = -EINTR;
4860 wake_up(&v->arch.cpu_run);
4865 return vcpu->arch.ret;
4877 struct kvm_nested_guest *nested = vcpu->arch.nested;
4884 vcpu->arch.ret = RESUME_GUEST;
4885 vcpu->arch.trap = 0;
4887 vc = vcpu->arch.vcore;
4888 vcpu->arch.ceded = 0;
4889 vcpu->arch.run_task = current;
4890 vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
4893 if (unlikely(!kvm->arch.mmu_ready)) {
4898 vcpu->arch.ret = r;
4916 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
4920 if (need_resched() || !kvm->arch.mmu_ready)
4924 vcpu->arch.thread_cpu = pcpu;
4941 &vcpu->arch.pending_exceptions) ||
4955 * L1's copy of L2's LPCR (vcpu->arch.vcore->lpcr) can get its MER bit
4966 } else if (vcpu->arch.pending_exceptions ||
4968 vcpu->arch.ret = RESUME_HOST;
4972 if (vcpu->arch.timer_running) {
4973 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
4974 vcpu->arch.timer_running = 0;
4991 vcpu->arch.trap = trap;
5001 vcpu->arch.thread_cpu = -1;
5002 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
5041 vcpu->arch.ret = r;
5052 vcpu->arch.ret = -EINTR;
5065 vcpu->arch.ceded = 0;
5070 return vcpu->arch.ret;
5075 vcpu->arch.ret = -EINTR;
5078 vcpu->arch.thread_cpu = -1;
5079 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
5093 start_timing(vcpu, &vcpu->arch.vcpu_entry);
5095 if (!vcpu->arch.sane) {
5125 if (!vcpu->arch.online) {
5126 atomic_inc(&vcpu->arch.vcore->online_count);
5127 vcpu->arch.online = 1;
5133 atomic_inc(&kvm->arch.vcpus_running);
5155 vcpu->arch.waitp = &vcpu->arch.vcore->wait;
5156 vcpu->arch.pgdir = kvm->mm->pgd;
5157 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
5160 accumulate_time(vcpu, &vcpu->arch.guest_entry);
5163 vcpu->arch.vcore->lpcr);
5168 accumulate_time(vcpu, &vcpu->arch.hcall);
5185 accumulate_time(vcpu, &vcpu->arch.pg_fault);
5188 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
5197 accumulate_time(vcpu, &vcpu->arch.vcpu_exit);
5199 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
5200 atomic_dec(&kvm->arch.vcpus_running);
5312 spin_lock(&vcpu->arch.vpa_update_lock);
5313 kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf);
5314 kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf);
5315 spin_unlock(&vcpu->arch.vpa_update_lock);
5330 vfree(slot->arch.rmap);
5331 slot->arch.rmap = NULL;
5340 unsigned long size = array_size(new->npages, sizeof(*new->arch.rmap));
5345 new->arch.rmap = vzalloc(size);
5346 if (!new->arch.rmap)
5349 new->arch.rmap = old->arch.rmap;
5367 atomic64_inc(&kvm->arch.mmio_update);
5388 if (!kvm->arch.secure_guest)
5409 * Update LPCR values in kvm->arch and in vcores.
5410 * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion
5411 * of kvm->arch.lpcr update).
5418 if ((kvm->arch.lpcr & mask) == lpcr)
5421 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
5424 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
5432 if (++cores_done >= kvm->arch.online_vcores)
5451 dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) |
5452 ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1);
5454 dw0 |= kvm->arch.sdr1;
5457 dw1 = kvm->arch.process_table;
5460 __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE;
5461 dw1 = PATB_GR | kvm->arch.process_table;
5463 kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1);
5468 * Must be called with kvm->arch.mmu_setup_lock held.
5482 if (!kvm->arch.hpt.virt) {
5531 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
5543 /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */
5557 * Must be called with kvm->arch.mmu_setup_lock held and
5567 kvm->arch.process_table = 0;
5570 kvm->arch.radix = 0;
5584 * Must be called with kvm->arch.mmu_setup_lock held and
5598 kvm->arch.radix = 1;
5600 kvmppc_free_hpt(&kvm->arch.hpt);
5607 (kvm->arch.host_lpcr & LPCR_HAIL))
5703 mutex_init(&kvm->arch.uvmem_lock);
5704 INIT_LIST_HEAD(&kvm->arch.uvmem_pfns);
5705 mutex_init(&kvm->arch.mmu_setup_lock);
5713 kvm->arch.lpid = lpid;
5742 kvm->arch.lpid = guest_id;
5754 cpumask_setall(&kvm->arch.need_tlb_flush);
5757 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
5758 sizeof(kvm->arch.enabled_hcalls));
5761 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
5765 kvm->arch.host_lpid = mfspr(SPRN_LPID);
5766 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
5777 kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
5805 kvm->arch.radix = 1;
5806 kvm->arch.mmu_ready = 1;
5811 (kvm->arch.host_lpcr & LPCR_HAIL))
5816 plpar_guest_delete(0, kvm->arch.lpid);
5818 kvmppc_free_lpid(kvm->arch.lpid);
5825 kvm->arch.lpcr = lpcr;
5828 kvm->arch.resize_hpt = NULL;
5838 kvm->arch.tlb_sets = 1;
5840 kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */
5842 kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */
5844 kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */
5846 kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */
5863 kvm->arch.smt_mode = threads_per_subcore;
5865 kvm->arch.smt_mode = 1;
5866 kvm->arch.emul_smt_mode = 1;
5884 kfree(kvm->arch.vcores[i]);
5885 kvm->arch.online_vcores = 0;
5899 kvmppc_free_hpt(&kvm->arch.hpt);
5905 kvm->arch.process_table = 0;
5906 if (kvm->arch.secure_guest)
5907 uv_svm_terminate(kvm->arch.lpid);
5909 kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
5913 kvmhv_flush_lpid(kvm->arch.lpid);
5914 plpar_guest_delete(0, kvm->arch.lpid);
5916 kvmppc_free_lpid(kvm->arch.lpid);
5958 kfree(kvm->arch.pimap);
5984 pimap = kvm->arch.pimap;
5992 kvm->arch.pimap = pimap;
6076 if (!kvm->arch.pimap)
6079 pimap = kvm->arch.pimap;
6289 mutex_lock(&kvm->arch.mmu_setup_lock);
6291 if (kvm->arch.mmu_ready) {
6292 kvm->arch.mmu_ready = 0;
6295 if (atomic_read(&kvm->arch.vcpus_running)) {
6296 kvm->arch.mmu_ready = 1;
6309 kvm->arch.process_table = cfg->process_table;
6317 mutex_unlock(&kvm->arch.mmu_setup_lock);
6334 kvm->arch.nested_enable = true;
6351 if (rc && vcpu->arch.nested)
6370 if (rc && vcpu->arch.nested)
6396 kvm->arch.svm_enabled = 1;
6416 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
6419 mutex_lock(&kvm->arch.mmu_setup_lock);
6420 mmu_was_ready = kvm->arch.mmu_ready;
6421 if (kvm->arch.mmu_ready) {
6422 kvm->arch.mmu_ready = 0;
6425 if (atomic_read(&kvm->arch.vcpus_running)) {
6426 kvm->arch.mmu_ready = 1;
6443 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
6448 ret = uv_svm_terminate(kvm->arch.lpid);
6465 spin_lock(&vcpu->arch.vpa_update_lock);
6466 unpin_vpa_reset(kvm, &vcpu->arch.dtl);
6467 unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow);
6468 unpin_vpa_reset(kvm, &vcpu->arch.vpa);
6469 spin_unlock(&vcpu->arch.vpa_update_lock);
6473 kvm->arch.secure_guest = 0;
6474 kvm->arch.mmu_ready = mmu_was_ready;
6476 mutex_unlock(&kvm->arch.mmu_setup_lock);
6487 kvm->arch.dawr1_enabled = true;