1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2008 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/kvm_host.h> 22 #include <linux/fs.h> 23 #include <linux/seq_file.h> 24 #include <linux/debugfs.h> 25 #include <linux/uaccess.h> 26 #include <linux/module.h> 27 28 #include <asm/time.h> 29 #include <asm-generic/div64.h> 30 31 #include "timing.h" 32 33 void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) 34 { 35 int i; 36 37 /* pause guest execution to avoid concurrent updates */ 38 mutex_lock(&vcpu->mutex); 39 40 vcpu->arch.last_exit_type = 0xDEAD; 41 for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { 42 vcpu->arch.timing_count_type[i] = 0; 43 vcpu->arch.timing_max_duration[i] = 0; 44 vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF; 45 vcpu->arch.timing_sum_duration[i] = 0; 46 vcpu->arch.timing_sum_quad_duration[i] = 0; 47 } 48 vcpu->arch.timing_last_exit = 0; 49 vcpu->arch.timing_exit.tv64 = 0; 50 vcpu->arch.timing_last_enter.tv64 = 0; 51 52 mutex_unlock(&vcpu->mutex); 53 } 54 55 static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type) 56 { 57 u64 old; 58 59 do_div(duration, tb_ticks_per_usec); 60 if (unlikely(duration > 0xFFFFFFFF)) { 61 printk(KERN_ERR"%s - duration too big -> overflow" 62 " duration %lld type %d exit #%d\n", 63 __func__, duration, type, 64 vcpu->arch.timing_count_type[type]); 65 return; 66 } 67 68 vcpu->arch.timing_count_type[type]++; 69 70 /* sum */ 71 old = vcpu->arch.timing_sum_duration[type]; 72 vcpu->arch.timing_sum_duration[type] += duration; 73 if (unlikely(old > vcpu->arch.timing_sum_duration[type])) { 74 printk(KERN_ERR"%s - wrap adding sum of durations" 75 " old %lld new %lld type %d exit # of type %d\n", 76 __func__, old, vcpu->arch.timing_sum_duration[type], 77 type, vcpu->arch.timing_count_type[type]); 78 } 79 80 /* square sum */ 81 old = vcpu->arch.timing_sum_quad_duration[type]; 82 vcpu->arch.timing_sum_quad_duration[type] += (duration*duration); 83 if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) { 84 printk(KERN_ERR"%s - wrap adding sum of squared durations" 85 " old %lld new %lld type %d exit # of type %d\n", 86 __func__, old, 87 vcpu->arch.timing_sum_quad_duration[type], 88 type, vcpu->arch.timing_count_type[type]); 89 } 90 91 /* set min/max */ 92 if (unlikely(duration < vcpu->arch.timing_min_duration[type])) 93 vcpu->arch.timing_min_duration[type] = duration; 94 if (unlikely(duration > vcpu->arch.timing_max_duration[type])) 95 vcpu->arch.timing_max_duration[type] = duration; 96 } 97 98 void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) 99 { 100 u64 exit = vcpu->arch.timing_last_exit; 101 u64 enter = vcpu->arch.timing_last_enter.tv64; 102 103 /* save exit time, used next exit when the reenter time is known */ 104 vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64; 105 106 if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0)) 107 return; /* skip incomplete cycle (e.g. after reset) */ 108 109 /* update statistics for average and standard deviation */ 110 add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type); 111 /* enter -> timing_last_exit is time spent in guest - log this too */ 112 add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter), 113 TIMEINGUEST); 114 } 115 116 static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = { 117 [MMIO_EXITS] = "MMIO", 118 [DCR_EXITS] = "DCR", 119 [SIGNAL_EXITS] = "SIGNAL", 120 [ITLB_REAL_MISS_EXITS] = "ITLBREAL", 121 [ITLB_VIRT_MISS_EXITS] = "ITLBVIRT", 122 [DTLB_REAL_MISS_EXITS] = "DTLBREAL", 123 [DTLB_VIRT_MISS_EXITS] = "DTLBVIRT", 124 [SYSCALL_EXITS] = "SYSCALL", 125 [ISI_EXITS] = "ISI", 126 [DSI_EXITS] = "DSI", 127 [EMULATED_INST_EXITS] = "EMULINST", 128 [EMULATED_MTMSRWE_EXITS] = "EMUL_WAIT", 129 [EMULATED_WRTEE_EXITS] = "EMUL_WRTEE", 130 [EMULATED_MTSPR_EXITS] = "EMUL_MTSPR", 131 [EMULATED_MFSPR_EXITS] = "EMUL_MFSPR", 132 [EMULATED_MTMSR_EXITS] = "EMUL_MTMSR", 133 [EMULATED_MFMSR_EXITS] = "EMUL_MFMSR", 134 [EMULATED_TLBSX_EXITS] = "EMUL_TLBSX", 135 [EMULATED_TLBWE_EXITS] = "EMUL_TLBWE", 136 [EMULATED_RFI_EXITS] = "EMUL_RFI", 137 [DEC_EXITS] = "DEC", 138 [EXT_INTR_EXITS] = "EXTINT", 139 [HALT_WAKEUP] = "HALT", 140 [USR_PR_INST] = "USR_PR_INST", 141 [FP_UNAVAIL] = "FP_UNAVAIL", 142 [DEBUG_EXITS] = "DEBUG", 143 [TIMEINGUEST] = "TIMEINGUEST" 144 }; 145 146 static int kvmppc_exit_timing_show(struct seq_file *m, void *private) 147 { 148 struct kvm_vcpu *vcpu = m->private; 149 int i; 150 151 seq_printf(m, "%s", "type count min max sum sum_squared\n"); 152 153 for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { 154 seq_printf(m, "%12s %10d %10lld %10lld %20lld %20lld\n", 155 kvm_exit_names[i], 156 vcpu->arch.timing_count_type[i], 157 vcpu->arch.timing_min_duration[i], 158 vcpu->arch.timing_max_duration[i], 159 vcpu->arch.timing_sum_duration[i], 160 vcpu->arch.timing_sum_quad_duration[i]); 161 } 162 return 0; 163 } 164 165 /* Write 'c' to clear the timing statistics. */ 166 static ssize_t kvmppc_exit_timing_write(struct file *file, 167 const char __user *user_buf, 168 size_t count, loff_t *ppos) 169 { 170 int err = -EINVAL; 171 char c; 172 173 if (count > 1) { 174 goto done; 175 } 176 177 if (get_user(c, user_buf)) { 178 err = -EFAULT; 179 goto done; 180 } 181 182 if (c == 'c') { 183 struct seq_file *seqf = file->private_data; 184 struct kvm_vcpu *vcpu = seqf->private; 185 /* Write does not affect our buffers previously generated with 186 * show. seq_file is locked here to prevent races of init with 187 * a show call */ 188 mutex_lock(&seqf->lock); 189 kvmppc_init_timing_stats(vcpu); 190 mutex_unlock(&seqf->lock); 191 err = count; 192 } 193 194 done: 195 return err; 196 } 197 198 static int kvmppc_exit_timing_open(struct inode *inode, struct file *file) 199 { 200 return single_open(file, kvmppc_exit_timing_show, inode->i_private); 201 } 202 203 static const struct file_operations kvmppc_exit_timing_fops = { 204 .owner = THIS_MODULE, 205 .open = kvmppc_exit_timing_open, 206 .read = seq_read, 207 .write = kvmppc_exit_timing_write, 208 .llseek = seq_lseek, 209 .release = single_release, 210 }; 211 212 void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id) 213 { 214 static char dbg_fname[50]; 215 struct dentry *debugfs_file; 216 217 snprintf(dbg_fname, sizeof(dbg_fname), "vm%u_vcpu%u_timing", 218 current->pid, id); 219 debugfs_file = debugfs_create_file(dbg_fname, 0666, 220 kvm_debugfs_dir, vcpu, 221 &kvmppc_exit_timing_fops); 222 223 if (!debugfs_file) { 224 printk(KERN_ERR"%s: error creating debugfs file %s\n", 225 __func__, dbg_fname); 226 return; 227 } 228 229 vcpu->arch.debugfs_exit_timing = debugfs_file; 230 } 231 232 void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu) 233 { 234 if (vcpu->arch.debugfs_exit_timing) { 235 debugfs_remove(vcpu->arch.debugfs_exit_timing); 236 vcpu->arch.debugfs_exit_timing = NULL; 237 } 238 } 239