1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2008 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/kvm_host.h> 22 #include <linux/fs.h> 23 #include <linux/seq_file.h> 24 #include <linux/debugfs.h> 25 #include <linux/uaccess.h> 26 #include <linux/module.h> 27 28 #include <asm/time.h> 29 #include <asm-generic/div64.h> 30 31 #include "timing.h" 32 33 void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) 34 { 35 int i; 36 37 /* Take a lock to avoid concurrent updates */ 38 mutex_lock(&vcpu->arch.exit_timing_lock); 39 40 vcpu->arch.last_exit_type = 0xDEAD; 41 for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { 42 vcpu->arch.timing_count_type[i] = 0; 43 vcpu->arch.timing_max_duration[i] = 0; 44 vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF; 45 vcpu->arch.timing_sum_duration[i] = 0; 46 vcpu->arch.timing_sum_quad_duration[i] = 0; 47 } 48 vcpu->arch.timing_last_exit = 0; 49 vcpu->arch.timing_exit.tv64 = 0; 50 vcpu->arch.timing_last_enter.tv64 = 0; 51 52 mutex_unlock(&vcpu->arch.exit_timing_lock); 53 } 54 55 static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type) 56 { 57 u64 old; 58 59 mutex_lock(&vcpu->arch.exit_timing_lock); 60 61 vcpu->arch.timing_count_type[type]++; 62 63 /* sum */ 64 old = vcpu->arch.timing_sum_duration[type]; 65 vcpu->arch.timing_sum_duration[type] += duration; 66 if (unlikely(old > vcpu->arch.timing_sum_duration[type])) { 67 printk(KERN_ERR"%s - wrap adding sum of durations" 68 " old %lld new %lld type %d exit # of type %d\n", 69 __func__, old, vcpu->arch.timing_sum_duration[type], 70 type, vcpu->arch.timing_count_type[type]); 71 } 72 73 /* square sum */ 74 old = vcpu->arch.timing_sum_quad_duration[type]; 75 vcpu->arch.timing_sum_quad_duration[type] += (duration*duration); 76 if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) { 77 printk(KERN_ERR"%s - wrap adding sum of squared durations" 78 " old %lld new %lld type %d exit # of type %d\n", 79 __func__, old, 80 vcpu->arch.timing_sum_quad_duration[type], 81 type, vcpu->arch.timing_count_type[type]); 82 } 83 84 /* set min/max */ 85 if (unlikely(duration < vcpu->arch.timing_min_duration[type])) 86 vcpu->arch.timing_min_duration[type] = duration; 87 if (unlikely(duration > vcpu->arch.timing_max_duration[type])) 88 vcpu->arch.timing_max_duration[type] = duration; 89 90 mutex_unlock(&vcpu->arch.exit_timing_lock); 91 } 92 93 void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) 94 { 95 u64 exit = vcpu->arch.timing_last_exit; 96 u64 enter = vcpu->arch.timing_last_enter.tv64; 97 98 /* save exit time, used next exit when the reenter time is known */ 99 vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64; 100 101 if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0)) 102 return; /* skip incomplete cycle (e.g. after reset) */ 103 104 /* update statistics for average and standard deviation */ 105 add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type); 106 /* enter -> timing_last_exit is time spent in guest - log this too */ 107 add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter), 108 TIMEINGUEST); 109 } 110 111 static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = { 112 [MMIO_EXITS] = "MMIO", 113 [SIGNAL_EXITS] = "SIGNAL", 114 [ITLB_REAL_MISS_EXITS] = "ITLBREAL", 115 [ITLB_VIRT_MISS_EXITS] = "ITLBVIRT", 116 [DTLB_REAL_MISS_EXITS] = "DTLBREAL", 117 [DTLB_VIRT_MISS_EXITS] = "DTLBVIRT", 118 [SYSCALL_EXITS] = "SYSCALL", 119 [ISI_EXITS] = "ISI", 120 [DSI_EXITS] = "DSI", 121 [EMULATED_INST_EXITS] = "EMULINST", 122 [EMULATED_MTMSRWE_EXITS] = "EMUL_WAIT", 123 [EMULATED_WRTEE_EXITS] = "EMUL_WRTEE", 124 [EMULATED_MTSPR_EXITS] = "EMUL_MTSPR", 125 [EMULATED_MFSPR_EXITS] = "EMUL_MFSPR", 126 [EMULATED_MTMSR_EXITS] = "EMUL_MTMSR", 127 [EMULATED_MFMSR_EXITS] = "EMUL_MFMSR", 128 [EMULATED_TLBSX_EXITS] = "EMUL_TLBSX", 129 [EMULATED_TLBWE_EXITS] = "EMUL_TLBWE", 130 [EMULATED_RFI_EXITS] = "EMUL_RFI", 131 [DEC_EXITS] = "DEC", 132 [EXT_INTR_EXITS] = "EXTINT", 133 [HALT_WAKEUP] = "HALT", 134 [USR_PR_INST] = "USR_PR_INST", 135 [FP_UNAVAIL] = "FP_UNAVAIL", 136 [DEBUG_EXITS] = "DEBUG", 137 [TIMEINGUEST] = "TIMEINGUEST" 138 }; 139 140 static int kvmppc_exit_timing_show(struct seq_file *m, void *private) 141 { 142 struct kvm_vcpu *vcpu = m->private; 143 int i; 144 u64 min, max, sum, sum_quad; 145 146 seq_puts(m, "type count min max sum sum_squared\n"); 147 148 for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { 149 150 min = vcpu->arch.timing_min_duration[i]; 151 do_div(min, tb_ticks_per_usec); 152 max = vcpu->arch.timing_max_duration[i]; 153 do_div(max, tb_ticks_per_usec); 154 sum = vcpu->arch.timing_sum_duration[i]; 155 do_div(sum, tb_ticks_per_usec); 156 sum_quad = vcpu->arch.timing_sum_quad_duration[i]; 157 do_div(sum_quad, tb_ticks_per_usec); 158 159 seq_printf(m, "%12s %10d %10lld %10lld %20lld %20lld\n", 160 kvm_exit_names[i], 161 vcpu->arch.timing_count_type[i], 162 min, 163 max, 164 sum, 165 sum_quad); 166 167 } 168 return 0; 169 } 170 171 /* Write 'c' to clear the timing statistics. */ 172 static ssize_t kvmppc_exit_timing_write(struct file *file, 173 const char __user *user_buf, 174 size_t count, loff_t *ppos) 175 { 176 int err = -EINVAL; 177 char c; 178 179 if (count > 1) { 180 goto done; 181 } 182 183 if (get_user(c, user_buf)) { 184 err = -EFAULT; 185 goto done; 186 } 187 188 if (c == 'c') { 189 struct seq_file *seqf = file->private_data; 190 struct kvm_vcpu *vcpu = seqf->private; 191 /* Write does not affect our buffers previously generated with 192 * show. seq_file is locked here to prevent races of init with 193 * a show call */ 194 mutex_lock(&seqf->lock); 195 kvmppc_init_timing_stats(vcpu); 196 mutex_unlock(&seqf->lock); 197 err = count; 198 } 199 200 done: 201 return err; 202 } 203 204 static int kvmppc_exit_timing_open(struct inode *inode, struct file *file) 205 { 206 return single_open(file, kvmppc_exit_timing_show, inode->i_private); 207 } 208 209 static const struct file_operations kvmppc_exit_timing_fops = { 210 .owner = THIS_MODULE, 211 .open = kvmppc_exit_timing_open, 212 .read = seq_read, 213 .write = kvmppc_exit_timing_write, 214 .llseek = seq_lseek, 215 .release = single_release, 216 }; 217 218 void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id) 219 { 220 static char dbg_fname[50]; 221 struct dentry *debugfs_file; 222 223 snprintf(dbg_fname, sizeof(dbg_fname), "vm%u_vcpu%u_timing", 224 current->pid, id); 225 debugfs_file = debugfs_create_file(dbg_fname, 0666, 226 kvm_debugfs_dir, vcpu, 227 &kvmppc_exit_timing_fops); 228 229 if (!debugfs_file) { 230 printk(KERN_ERR"%s: error creating debugfs file %s\n", 231 __func__, dbg_fname); 232 return; 233 } 234 235 vcpu->arch.debugfs_exit_timing = debugfs_file; 236 } 237 238 void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu) 239 { 240 if (vcpu->arch.debugfs_exit_timing) { 241 debugfs_remove(vcpu->arch.debugfs_exit_timing); 242 vcpu->arch.debugfs_exit_timing = NULL; 243 } 244 } 245