1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #include <hyp/debug-sr.h> 8 9 #include <linux/compiler.h> 10 #include <linux/kvm_host.h> 11 12 #include <asm/debug-monitors.h> 13 #include <asm/kvm_asm.h> 14 #include <asm/kvm_hyp.h> 15 #include <asm/kvm_mmu.h> 16 17 static void __debug_save_spe(u64 *pmscr_el1) 18 { 19 u64 reg; 20 21 /* Clear pmscr in case of early return */ 22 *pmscr_el1 = 0; 23 24 /* 25 * At this point, we know that this CPU implements 26 * SPE and is available to the host. 27 * Check if the host is actually using it ? 28 */ 29 reg = read_sysreg_s(SYS_PMBLIMITR_EL1); 30 if (!(reg & BIT(PMBLIMITR_EL1_E_SHIFT))) 31 return; 32 33 /* Yes; save the control register and disable data generation */ 34 *pmscr_el1 = read_sysreg_el1(SYS_PMSCR); 35 write_sysreg_el1(0, SYS_PMSCR); 36 isb(); 37 38 /* Now drain all buffered data to memory */ 39 psb_csync(); 40 } 41 42 static void __debug_restore_spe(u64 pmscr_el1) 43 { 44 if (!pmscr_el1) 45 return; 46 47 /* The host page table is installed, but not yet synchronised */ 48 isb(); 49 50 /* Re-enable data generation */ 51 write_sysreg_el1(pmscr_el1, SYS_PMSCR); 52 } 53 54 static void __trace_do_switch(u64 *saved_trfcr, u64 new_trfcr) 55 { 56 *saved_trfcr = read_sysreg_el1(SYS_TRFCR); 57 write_sysreg_el1(new_trfcr, SYS_TRFCR); 58 } 59 60 static bool __trace_needs_drain(void) 61 { 62 if (is_protected_kvm_enabled() && host_data_test_flag(HAS_TRBE)) 63 return read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_EL1_E; 64 65 return host_data_test_flag(TRBE_ENABLED); 66 } 67 68 static bool __trace_needs_switch(void) 69 { 70 return host_data_test_flag(TRBE_ENABLED) || 71 host_data_test_flag(EL1_TRACING_CONFIGURED); 72 } 73 74 static void __trace_switch_to_guest(void) 75 { 76 /* Unsupported with TRBE so disable */ 77 if (host_data_test_flag(TRBE_ENABLED)) 78 *host_data_ptr(trfcr_while_in_guest) = 0; 79 80 __trace_do_switch(host_data_ptr(host_debug_state.trfcr_el1), 81 *host_data_ptr(trfcr_while_in_guest)); 82 83 if (__trace_needs_drain()) { 84 isb(); 85 tsb_csync(); 86 } 87 } 88 89 static void __trace_switch_to_host(void) 90 { 91 __trace_do_switch(host_data_ptr(trfcr_while_in_guest), 92 *host_data_ptr(host_debug_state.trfcr_el1)); 93 } 94 95 static void __debug_save_brbe(u64 *brbcr_el1) 96 { 97 *brbcr_el1 = 0; 98 99 /* Check if the BRBE is enabled */ 100 if (!(read_sysreg_el1(SYS_BRBCR) & (BRBCR_ELx_E0BRE | BRBCR_ELx_ExBRE))) 101 return; 102 103 /* 104 * Prohibit branch record generation while we are in guest. 105 * Since access to BRBCR_EL1 is trapped, the guest can't 106 * modify the filtering set by the host. 107 */ 108 *brbcr_el1 = read_sysreg_el1(SYS_BRBCR); 109 write_sysreg_el1(0, SYS_BRBCR); 110 } 111 112 static void __debug_restore_brbe(u64 brbcr_el1) 113 { 114 if (!brbcr_el1) 115 return; 116 117 /* Restore BRBE controls */ 118 write_sysreg_el1(brbcr_el1, SYS_BRBCR); 119 } 120 121 void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu) 122 { 123 /* Disable and flush SPE data generation */ 124 if (host_data_test_flag(HAS_SPE)) 125 __debug_save_spe(host_data_ptr(host_debug_state.pmscr_el1)); 126 127 /* Disable BRBE branch records */ 128 if (host_data_test_flag(HAS_BRBE)) 129 __debug_save_brbe(host_data_ptr(host_debug_state.brbcr_el1)); 130 131 if (__trace_needs_switch()) 132 __trace_switch_to_guest(); 133 } 134 135 void __debug_switch_to_guest(struct kvm_vcpu *vcpu) 136 { 137 __debug_switch_to_guest_common(vcpu); 138 } 139 140 void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu) 141 { 142 if (host_data_test_flag(HAS_SPE)) 143 __debug_restore_spe(*host_data_ptr(host_debug_state.pmscr_el1)); 144 if (host_data_test_flag(HAS_BRBE)) 145 __debug_restore_brbe(*host_data_ptr(host_debug_state.brbcr_el1)); 146 if (__trace_needs_switch()) 147 __trace_switch_to_host(); 148 } 149 150 void __debug_switch_to_host(struct kvm_vcpu *vcpu) 151 { 152 __debug_switch_to_host_common(vcpu); 153 } 154