1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited 4 */ 5 6 #include <linux/err.h> 7 #include <linux/errno.h> 8 #include <asm/kvm_csr.h> 9 #include <asm/kvm_vcpu.h> 10 11 static unsigned int priority_to_irq[EXCCODE_INT_NUM] = { 12 [INT_TI] = CPU_TIMER, 13 [INT_IPI] = CPU_IPI, 14 [INT_SWI0] = CPU_SIP0, 15 [INT_SWI1] = CPU_SIP1, 16 [INT_HWI0] = CPU_IP0, 17 [INT_HWI1] = CPU_IP1, 18 [INT_HWI2] = CPU_IP2, 19 [INT_HWI3] = CPU_IP3, 20 [INT_HWI4] = CPU_IP4, 21 [INT_HWI5] = CPU_IP5, 22 [INT_HWI6] = CPU_IP6, 23 [INT_HWI7] = CPU_IP7, 24 }; 25 26 static int kvm_irq_deliver(struct kvm_vcpu *vcpu, unsigned int priority) 27 { 28 unsigned int irq = 0; 29 30 clear_bit(priority, &vcpu->arch.irq_pending); 31 if (priority < EXCCODE_INT_NUM) 32 irq = priority_to_irq[priority]; 33 34 switch (priority) { 35 case INT_TI: 36 case INT_IPI: 37 case INT_SWI0: 38 case INT_SWI1: 39 set_gcsr_estat(irq); 40 break; 41 42 case INT_HWI0 ... INT_HWI7: 43 set_csr_gintc(irq); 44 break; 45 46 default: 47 break; 48 } 49 50 return 1; 51 } 52 53 static int kvm_irq_clear(struct kvm_vcpu *vcpu, unsigned int priority) 54 { 55 unsigned int irq = 0; 56 57 clear_bit(priority, &vcpu->arch.irq_clear); 58 if (priority < EXCCODE_INT_NUM) 59 irq = priority_to_irq[priority]; 60 61 switch (priority) { 62 case INT_TI: 63 case INT_IPI: 64 case INT_SWI0: 65 case INT_SWI1: 66 clear_gcsr_estat(irq); 67 break; 68 69 case INT_HWI0 ... INT_HWI7: 70 clear_csr_gintc(irq); 71 break; 72 73 default: 74 break; 75 } 76 77 return 1; 78 } 79 80 void kvm_deliver_intr(struct kvm_vcpu *vcpu) 81 { 82 unsigned int priority; 83 unsigned long *pending = &vcpu->arch.irq_pending; 84 unsigned long *pending_clr = &vcpu->arch.irq_clear; 85 86 for_each_set_bit(priority, pending_clr, INT_IPI + 1) 87 kvm_irq_clear(vcpu, priority); 88 89 for_each_set_bit(priority, pending, INT_IPI + 1) 90 kvm_irq_deliver(vcpu, priority); 91 } 92 93 int kvm_pending_timer(struct kvm_vcpu *vcpu) 94 { 95 return test_bit(INT_TI, &vcpu->arch.irq_pending); 96 } 97 98 /* 99 * Only support illegal instruction or illegal Address Error exception, 100 * Other exceptions are injected by hardware in kvm mode 101 */ 102 static void _kvm_deliver_exception(struct kvm_vcpu *vcpu, 103 unsigned int code, unsigned int subcode) 104 { 105 unsigned long val, vec_size; 106 107 /* 108 * BADV is added for EXCCODE_ADE exception 109 * Use PC register (GVA address) if it is instruction exeception 110 * Else use BADV from host side (GPA address) for data exeception 111 */ 112 if (code == EXCCODE_ADE) { 113 if (subcode == EXSUBCODE_ADEF) 114 val = vcpu->arch.pc; 115 else 116 val = vcpu->arch.badv; 117 kvm_write_hw_gcsr(LOONGARCH_CSR_BADV, val); 118 } 119 120 /* Set exception instruction */ 121 kvm_write_hw_gcsr(LOONGARCH_CSR_BADI, vcpu->arch.badi); 122 123 /* 124 * Save CRMD in PRMD 125 * Set IRQ disabled and PLV0 with CRMD 126 */ 127 val = kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD); 128 kvm_write_hw_gcsr(LOONGARCH_CSR_PRMD, val); 129 val = val & ~(CSR_CRMD_PLV | CSR_CRMD_IE); 130 kvm_write_hw_gcsr(LOONGARCH_CSR_CRMD, val); 131 132 /* Set exception PC address */ 133 kvm_write_hw_gcsr(LOONGARCH_CSR_ERA, vcpu->arch.pc); 134 135 /* 136 * Set exception code 137 * Exception and interrupt can be inject at the same time 138 * Hardware will handle exception first and then extern interrupt 139 * Exception code is Ecode in ESTAT[16:21] 140 * Interrupt code in ESTAT[0:12] 141 */ 142 val = kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT); 143 val = (val & ~CSR_ESTAT_EXC) | code; 144 kvm_write_hw_gcsr(LOONGARCH_CSR_ESTAT, val); 145 146 /* Calculate expcetion entry address */ 147 val = kvm_read_hw_gcsr(LOONGARCH_CSR_ECFG); 148 vec_size = (val & CSR_ECFG_VS) >> CSR_ECFG_VS_SHIFT; 149 if (vec_size) 150 vec_size = (1 << vec_size) * 4; 151 val = kvm_read_hw_gcsr(LOONGARCH_CSR_EENTRY); 152 vcpu->arch.pc = val + code * vec_size; 153 } 154 155 void kvm_deliver_exception(struct kvm_vcpu *vcpu) 156 { 157 unsigned int code; 158 unsigned long *pending = &vcpu->arch.exception_pending; 159 160 if (*pending) { 161 code = __ffs(*pending); 162 _kvm_deliver_exception(vcpu, code, vcpu->arch.esubcode); 163 *pending = 0; 164 vcpu->arch.esubcode = 0; 165 } 166 } 167