/linux/arch/powerpc/kvm/ |
H A D | booke_emulate.c | 26 vcpu->arch.regs.nip = vcpu->arch.shared->srr0; in kvmppc_emul_rfi() 27 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); in kvmppc_emul_rfi() 32 vcpu->arch.regs.nip = vcpu->arch.dsrr0; in kvmppc_emul_rfdi() 33 kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); in kvmppc_emul_rfdi() 38 vcpu->arch.regs.nip = vcpu->arch.csrr0; in kvmppc_emul_rfci() 39 kvmppc_set_msr(vcpu, vcpu->arch.csrr1); in kvmppc_emul_rfci() 80 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); in kvmppc_booke_emulate_op() 90 vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) in kvmppc_booke_emulate_op() 96 vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) in kvmppc_booke_emulate_op() 127 vcpu->arch.shared->dar = spr_val; in kvmppc_booke_emulate_mtspr() [all …]
|
H A D | book3s_hv_tm.c | 19 u64 msr = vcpu->arch.shregs.msr; in emulate_tx_failure() 21 tfiar = vcpu->arch.regs.nip & ~0x3ull; in emulate_tx_failure() 23 if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) in emulate_tx_failure() 29 vcpu->arch.tfiar = tfiar; in emulate_tx_failure() 31 vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr; in emulate_tx_failure() 44 u32 instr = vcpu->arch.emul_inst; in kvmhv_p9_tm_emulation() 45 u64 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation() 56 vcpu->arch.regs.nip -= 4; in kvmhv_p9_tm_emulation() 72 newmsr = vcpu->arch.shregs.srr1; in kvmhv_p9_tm_emulation() 78 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation() [all …]
|
H A D | book3s_hv_p9_entry.c | 14 mtspr(SPRN_TAR, vcpu->arch.tar); in load_spr_state() 18 current->thread.vrsave != vcpu->arch.vrsave) in load_spr_state() 19 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); in load_spr_state() 22 if (vcpu->arch.hfscr & HFSCR_EBB) { in load_spr_state() 23 if (current->thread.ebbhr != vcpu->arch.ebbhr) in load_spr_state() 24 mtspr(SPRN_EBBHR, vcpu->arch.ebbhr); in load_spr_state() 25 if (current->thread.ebbrr != vcpu->arch.ebbrr) in load_spr_state() 26 mtspr(SPRN_EBBRR, vcpu->arch.ebbrr); in load_spr_state() 27 if (current->thread.bescr != vcpu->arch.bescr) in load_spr_state() 28 mtspr(SPRN_BESCR, vcpu->arch.bescr); in load_spr_state() [all …]
|
H A D | booke.c | 94 printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip, in kvmppc_dump_vcpu() 95 vcpu->arch.shared->msr); in kvmppc_dump_vcpu() 96 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link, in kvmppc_dump_vcpu() 97 vcpu->arch.regs.ctr); in kvmppc_dump_vcpu() 98 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, in kvmppc_dump_vcpu() 99 vcpu->arch.shared->srr1); in kvmppc_dump_vcpu() 101 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); in kvmppc_dump_vcpu() 119 vcpu->arch.shadow_msr &= ~MSR_SPE; in kvmppc_vcpu_disable_spe() 129 vcpu->arch.shadow_msr |= MSR_SPE; in kvmppc_vcpu_enable_spe() 135 if (vcpu->arch.shared->msr & MSR_SPE) { in kvmppc_vcpu_sync_spe() [all …]
|
H A D | emulate_loadstore.c | 85 vcpu->arch.mmio_vsx_copy_nums = 0; in kvmppc_emulate_loadstore() 86 vcpu->arch.mmio_vsx_offset = 0; in kvmppc_emulate_loadstore() 87 vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE; in kvmppc_emulate_loadstore() 88 vcpu->arch.mmio_sp64_extend = 0; in kvmppc_emulate_loadstore() 89 vcpu->arch.mmio_sign_extend = 0; in kvmppc_emulate_loadstore() 90 vcpu->arch.mmio_vmx_copy_nums = 0; in kvmppc_emulate_loadstore() 91 vcpu->arch.mmio_vmx_offset = 0; in kvmppc_emulate_loadstore() 92 vcpu->arch.mmio_host_swabbed = 0; in kvmppc_emulate_loadstore() 95 vcpu->arch.regs.msr = kvmppc_get_msr(vcpu); in kvmppc_emulate_loadstore() 96 if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) { in kvmppc_emulate_loadstore() [all …]
|
H A D | timing.c | 27 mutex_lock(&vcpu->arch.exit_timing_lock); in kvmppc_init_timing_stats() 29 vcpu->arch.last_exit_type = 0xDEAD; in kvmppc_init_timing_stats() 31 vcpu->arch.timing_count_type[i] = 0; in kvmppc_init_timing_stats() 32 vcpu->arch.timing_max_duration[i] = 0; in kvmppc_init_timing_stats() 33 vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF; in kvmppc_init_timing_stats() 34 vcpu->arch.timing_sum_duration[i] = 0; in kvmppc_init_timing_stats() 35 vcpu->arch.timing_sum_quad_duration[i] = 0; in kvmppc_init_timing_stats() 37 vcpu->arch.timing_last_exit = 0; in kvmppc_init_timing_stats() 38 vcpu->arch.timing_exit.tv64 = 0; in kvmppc_init_timing_stats() 39 vcpu->arch.timing_last_enter.tv64 = 0; in kvmppc_init_timing_stats() [all …]
|
H A D | book3s_hv_p9_perf.c | 41 lp = vcpu->arch.vpa.pinned_addr; in switch_pmu_to_guest() 87 if (load_pmu || (vcpu->arch.hfscr & HFSCR_PM)) { in switch_pmu_to_guest() 88 mtspr(SPRN_PMC1, vcpu->arch.pmc[0]); in switch_pmu_to_guest() 89 mtspr(SPRN_PMC2, vcpu->arch.pmc[1]); in switch_pmu_to_guest() 90 mtspr(SPRN_PMC3, vcpu->arch.pmc[2]); in switch_pmu_to_guest() 91 mtspr(SPRN_PMC4, vcpu->arch.pmc[3]); in switch_pmu_to_guest() 92 mtspr(SPRN_PMC5, vcpu->arch.pmc[4]); in switch_pmu_to_guest() 93 mtspr(SPRN_PMC6, vcpu->arch.pmc[5]); in switch_pmu_to_guest() 94 mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]); in switch_pmu_to_guest() 95 mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]); in switch_pmu_to_guest() [all …]
|
H A D | book3s_emulate.c | 73 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER)) in spr_allowed() 86 memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0], in kvmppc_copyto_vcpu_tm() 87 sizeof(vcpu->arch.gpr_tm)); in kvmppc_copyto_vcpu_tm() 88 memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp, in kvmppc_copyto_vcpu_tm() 90 memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr, in kvmppc_copyto_vcpu_tm() 92 vcpu->arch.ppr_tm = vcpu->arch.ppr; in kvmppc_copyto_vcpu_tm() 93 vcpu->arch.dscr_tm = vcpu->arch.dscr; in kvmppc_copyto_vcpu_tm() 94 vcpu->arch.amr_tm = vcpu->arch.amr; in kvmppc_copyto_vcpu_tm() 95 vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; in kvmppc_copyto_vcpu_tm() 96 vcpu->arch.tar_tm = vcpu->arch.tar; in kvmppc_copyto_vcpu_tm() [all …]
|
H A D | book3s_hv_tm_builtin.c | 22 u32 instr = vcpu->arch.emul_inst; in kvmhv_p9_tm_emulation_early() 40 newmsr = vcpu->arch.shregs.srr1; in kvmhv_p9_tm_emulation_early() 45 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation_early() 46 vcpu->arch.cfar = vcpu->arch.regs.nip - 4; in kvmhv_p9_tm_emulation_early() 47 vcpu->arch.regs.nip = vcpu->arch.shregs.srr0; in kvmhv_p9_tm_emulation_early() 52 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation_early() 53 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) in kvmhv_p9_tm_emulation_early() 56 if (!(vcpu->arch.hfscr & HFSCR_EBB) || in kvmhv_p9_tm_emulation_early() 68 vcpu->arch.shregs.msr = msr; in kvmhv_p9_tm_emulation_early() 69 vcpu->arch.cfar = vcpu->arch.regs.nip - 4; in kvmhv_p9_tm_emulation_early() [all …]
|
H A D | e500_emulate.c | 53 ulong param = vcpu->arch.regs.gpr[rb]; in kvmppc_e500_emul_msgclr() 59 clear_bit(prio, &vcpu->arch.pending_exceptions); in kvmppc_e500_emul_msgclr() 65 ulong param = vcpu->arch.regs.gpr[rb]; in kvmppc_e500_emul_msgsnd() 75 int cpir = cvcpu->arch.shared->pir; in kvmppc_e500_emul_msgsnd() 77 set_bit(prio, &cvcpu->arch.pending_exceptions); in kvmppc_e500_emul_msgsnd() 94 vcpu->run->debug.arch.address = vcpu->arch.regs.nip; in kvmppc_e500_emul_ehpriv() 95 vcpu->run->debug.arch.status = 0; in kvmppc_e500_emul_ehpriv() 225 vcpu->arch.shared->mas0 = spr_val; in kvmppc_core_emulate_mtspr_e500() 228 vcpu->arch.shared->mas1 = spr_val; in kvmppc_core_emulate_mtspr_e500() 231 vcpu->arch.shared->mas2 = spr_val; in kvmppc_core_emulate_mtspr_e500() [all …]
|
H A D | e500mc.c | 107 vcpu->arch.pid = pid; in kvmppc_set_pid() 124 mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); in kvmppc_core_vcpu_load_e500mc() 126 mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp); in kvmppc_core_vcpu_load_e500mc() 127 vcpu->arch.eplc = EPC_EGS | (get_lpid(vcpu) << EPC_ELPID_SHIFT); in kvmppc_core_vcpu_load_e500mc() 128 vcpu->arch.epsc = vcpu->arch.eplc; in kvmppc_core_vcpu_load_e500mc() 129 mtspr(SPRN_EPLC, vcpu->arch.eplc); in kvmppc_core_vcpu_load_e500mc() 130 mtspr(SPRN_EPSC, vcpu->arch.epsc); in kvmppc_core_vcpu_load_e500mc() 132 mtspr(SPRN_GIVPR, vcpu->arch.ivpr); in kvmppc_core_vcpu_load_e500mc() 133 mtspr(SPRN_GIVOR2, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); in kvmppc_core_vcpu_load_e500mc() 134 mtspr(SPRN_GIVOR8, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); in kvmppc_core_vcpu_load_e500mc() [all …]
|
H A D | e500_mmu.c | 71 esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2); in get_tlb_esel() 134 tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1; in kvmppc_e500_deliver_tlb_miss() 136 tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f; in kvmppc_e500_deliver_tlb_miss() 138 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) in kvmppc_e500_deliver_tlb_miss() 140 vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) in kvmppc_e500_deliver_tlb_miss() 143 vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) in kvmppc_e500_deliver_tlb_miss() 144 | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK); in kvmppc_e500_deliver_tlb_miss() 145 vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; in kvmppc_e500_deliver_tlb_miss() 146 vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1) in kvmppc_e500_deliver_tlb_miss() 332 vcpu->arch.shared->mas0 &= ~MAS0_NV(~0); in kvmppc_e500_emul_tlbre() [all …]
|
/linux/arch/mips/kvm/ |
H A D | emulate.c | 45 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_compute_return_epc() local 65 arch->gprs[insn.r_format.rd] = epc + 8; in kvm_compute_return_epc() 68 nextpc = arch->gprs[insn.r_format.rs]; in kvm_compute_return_epc() 84 if ((long)arch->gprs[insn.i_format.rs] < 0) in kvm_compute_return_epc() 93 if ((long)arch->gprs[insn.i_format.rs] >= 0) in kvm_compute_return_epc() 102 arch->gprs[31] = epc + 8; in kvm_compute_return_epc() 103 if ((long)arch->gprs[insn.i_format.rs] < 0) in kvm_compute_return_epc() 112 arch->gprs[31] = epc + 8; in kvm_compute_return_epc() 113 if ((long)arch->gprs[insn.i_format.rs] >= 0) in kvm_compute_return_epc() 141 arch->gprs[31] = instpc + 8; in kvm_compute_return_epc() [all …]
|
H A D | mips.c | 115 return !!(vcpu->arch.pending_exceptions); in kvm_arch_vcpu_runnable() 151 kvm->arch.gpa_mm.pgd = kvm_pgd_alloc(); in kvm_arch_init_vm() 152 if (!kvm->arch.gpa_mm.pgd) in kvm_arch_init_vm() 166 pgd_free(NULL, kvm->arch.gpa_mm.pgd); in kvm_mips_free_gpa_pt() 263 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); in kvm_mips_comparecount_wakeup() 267 vcpu->arch.wait = 0; in kvm_mips_comparecount_wakeup() 291 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, in kvm_arch_vcpu_create() 293 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; in kvm_arch_vcpu_create() 326 vcpu->arch.guest_ebase = gebase; in kvm_arch_vcpu_create() 353 vcpu->arch.vcpu_run = p; in kvm_arch_vcpu_create() [all …]
|
/linux/tools/perf/util/ |
H A D | perf_regs.c | 33 const char *perf_reg_name(int id, const char *arch) in perf_reg_name() argument 37 if (!strcmp(arch, "csky")) in perf_reg_name() 39 else if (!strcmp(arch, "loongarch")) in perf_reg_name() 41 else if (!strcmp(arch, "mips")) in perf_reg_name() 43 else if (!strcmp(arch, "powerpc")) in perf_reg_name() 45 else if (!strcmp(arch, "riscv")) in perf_reg_name() 47 else if (!strcmp(arch, "s390")) in perf_reg_name() 49 else if (!strcmp(arch, "x86")) in perf_reg_name() 51 else if (!strcmp(arch, "arm")) in perf_reg_name() 53 else if (!strcmp(arch, "arm64")) in perf_reg_name() [all …]
|
/linux/arch/s390/kvm/ |
H A D | guestdbg.c | 62 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_bp() 63 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_bp() 64 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; in enable_all_hw_bp() 67 if (vcpu->arch.guestdbg.nr_hw_bp <= 0 || in enable_all_hw_bp() 68 vcpu->arch.guestdbg.hw_bp_info == NULL) in enable_all_hw_bp() 79 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { in enable_all_hw_bp() 80 start = vcpu->arch.guestdbg.hw_bp_info[i].addr; in enable_all_hw_bp() 81 len = vcpu->arch.guestdbg.hw_bp_info[i].len; in enable_all_hw_bp() 102 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_wp() 103 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_wp() [all …]
|
/linux/scripts/ |
H A D | head-object-list.txt | 14 arch/alpha/kernel/head.o 15 arch/arc/kernel/head.o 16 arch/arm/kernel/head-nommu.o 17 arch/arm/kernel/head.o 18 arch/csky/kernel/head.o 19 arch/hexagon/kernel/head.o 20 arch/loongarch/kernel/head.o 21 arch/m68k/68000/head.o 22 arch/m68k/coldfire/head.o 23 arch/m68k/kernel/head.o [all …]
|
H A D | checkstack.pl | 42 my $arch = shift; 43 if ($arch eq "") { 44 $arch = `uname -m`; 45 chomp($arch); 56 if ($arch =~ '^(aarch|arm)64$') { 61 } elsif ($arch eq 'arm') { 65 } elsif ($arch =~ /^x86(_64)?$/ || $arch =~ /^i[3456]86$/) { 71 } elsif ($arch eq 'm68k') { 75 } elsif ($arch eq 'mips64') { 78 } elsif ($arch eq 'mips') { [all …]
|
/linux/arch/loongarch/kvm/ |
H A D | vcpu.c | 39 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_save_host_pmu() 54 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_restore_host_pmu() 68 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_save_guest_pmu() 82 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_restore_guest_pmu() 98 if (!kvm_guest_has_pmu(&vcpu->arch)) in kvm_own_pmu() 105 val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; in kvm_own_pmu() 116 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_lose_pmu() 118 if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU)) in kvm_lose_pmu() 136 vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU; in kvm_lose_pmu() 143 if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU)) in kvm_restore_pmu() [all …]
|
/linux/tools/testing/selftests/kvm/x86_64/ |
H A D | debug_regs.c | 112 run->debug.arch.exception == BP_VECTOR && in main() 113 run->debug.arch.pc == CAST_TO_RIP(sw_bp), in main() 115 run->exit_reason, run->debug.arch.exception, in main() 116 run->debug.arch.pc, CAST_TO_RIP(sw_bp)); in main() 123 debug.arch.debugreg[i] = CAST_TO_RIP(hw_bp); in main() 124 debug.arch.debugreg[7] = 0x400 | (1UL << (2*i+1)); in main() 129 run->debug.arch.exception == DB_VECTOR && in main() 130 run->debug.arch.pc == CAST_TO_RIP(hw_bp) && in main() 131 run->debug.arch.dr6 == target_dr6, in main() 134 i, run->exit_reason, run->debug.arch in main() [all...] |
/linux/arch/riscv/kvm/ |
H A D | vcpu.c | 51 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_reset_vcpu() 52 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; in kvm_riscv_reset_vcpu() 53 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_reset_vcpu() 54 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context; in kvm_riscv_reset_vcpu() 67 vcpu->arch.last_exit_cpu = -1; in kvm_riscv_reset_vcpu() 71 spin_lock(&vcpu->arch.reset_cntx_lock); in kvm_riscv_reset_vcpu() 73 spin_unlock(&vcpu->arch.reset_cntx_lock); in kvm_riscv_reset_vcpu() 83 bitmap_zero(vcpu->arch.irqs_pending, KVM_RISCV_VCPU_NR_IRQS); in kvm_riscv_reset_vcpu() 84 bitmap_zero(vcpu->arch.irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS); in kvm_riscv_reset_vcpu() 88 vcpu->arch.hfence_head = 0; in kvm_riscv_reset_vcpu() [all …]
|
/linux/arch/x86/kernel/ |
H A D | machine_kexec_32.c | 45 free_pages((unsigned long)image->arch.pgd, PGD_ALLOCATION_ORDER); in machine_kexec_free_page_tables() 46 image->arch.pgd = NULL; in machine_kexec_free_page_tables() 48 free_page((unsigned long)image->arch.pmd0); in machine_kexec_free_page_tables() 49 image->arch.pmd0 = NULL; in machine_kexec_free_page_tables() 50 free_page((unsigned long)image->arch.pmd1); in machine_kexec_free_page_tables() 51 image->arch.pmd1 = NULL; in machine_kexec_free_page_tables() 53 free_page((unsigned long)image->arch.pte0); in machine_kexec_free_page_tables() 54 image->arch.pte0 = NULL; in machine_kexec_free_page_tables() 55 free_page((unsigned long)image->arch.pte1); in machine_kexec_free_page_tables() 56 image->arch.pte1 = NULL; in machine_kexec_free_page_tables() [all …]
|
/linux/arch/arm64/kvm/ |
H A D | debug.c | 44 vcpu->arch.guest_debug_preserved.mdscr_el1 = val; in save_guest_debug_regs() 47 vcpu->arch.guest_debug_preserved.mdscr_el1); in save_guest_debug_regs() 49 vcpu->arch.guest_debug_preserved.pstate_ss = in save_guest_debug_regs() 55 u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1; in restore_guest_debug_regs() 62 if (vcpu->arch.guest_debug_preserved.pstate_ss) in restore_guest_debug_regs() 102 vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK; in kvm_arm_setup_mdcr_el2() 103 vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM | in kvm_arm_setup_mdcr_el2() 113 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE; in kvm_arm_setup_mdcr_el2() 125 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA; in kvm_arm_setup_mdcr_el2() 127 trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2); in kvm_arm_setup_mdcr_el2() [all …]
|
/linux/tools/perf/trace/beauty/ |
H A D | arch_errno_names.sh | 20 arch="$1" 22 header="$toolsdir/arch/$arch/include/uapi/asm/errno.h" 32 arch=$(arch_string "$1") 34 printf "static const char *errno_to_name__%s(int err)\n{\n\tswitch (err) {\n" $arch 45 arch="$1" 46 asm_errno=$(asm_errno_file "$arch") 52 |IFS=, create_errno_lookup_func "$arch" 60 printf 'arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch)\n' 62 for arch i [all...] |
/linux/arch/riscv/kernel/ |
H A D | module-sections.c | 15 struct mod_section *got_sec = &mod->arch.got; in module_emit_got_entry() 34 struct mod_section *got_plt_sec = &mod->arch.got_plt; in module_emit_plt_entry() 36 struct mod_section *plt_sec = &mod->arch.plt; in module_emit_plt_entry() 102 mod->arch.plt.shdr = sechdrs + i; in module_frob_arch_sections() 104 mod->arch.got.shdr = sechdrs + i; in module_frob_arch_sections() 106 mod->arch.got_plt.shdr = sechdrs + i; in module_frob_arch_sections() 109 if (!mod->arch.plt.shdr) { in module_frob_arch_sections() 113 if (!mod->arch.got.shdr) { in module_frob_arch_sections() 117 if (!mod->arch.got_plt.shdr) { in module_frob_arch_sections() 138 mod->arch.plt.shdr->sh_type = SHT_NOBITS; in module_frob_arch_sections() [all …]
|