1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2008 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/kvm_host.h> 22 #include <linux/fs.h> 23 #include <linux/seq_file.h> 24 #include <linux/debugfs.h> 25 #include <linux/uaccess.h> 26 #include <linux/module.h> 27 28 #include <asm/time.h> 29 #include <asm-generic/div64.h> 30 31 #include "timing.h" 32 33 void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) 34 { 35 int i; 36 37 /* Take a lock to avoid concurrent updates */ 38 mutex_lock(&vcpu->arch.exit_timing_lock); 39 40 vcpu->arch.last_exit_type = 0xDEAD; 41 for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { 42 vcpu->arch.timing_count_type[i] = 0; 43 vcpu->arch.timing_max_duration[i] = 0; 44 vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF; 45 vcpu->arch.timing_sum_duration[i] = 0; 46 vcpu->arch.timing_sum_quad_duration[i] = 0; 47 } 48 vcpu->arch.timing_last_exit = 0; 49 vcpu->arch.timing_exit.tv64 = 0; 50 vcpu->arch.timing_last_enter.tv64 = 0; 51 52 mutex_unlock(&vcpu->arch.exit_timing_lock); 53 } 54 55 static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type) 56 { 57 u64 old; 58 59 mutex_lock(&vcpu->arch.exit_timing_lock); 60 61 vcpu->arch.timing_count_type[type]++; 62 63 /* sum */ 64 old = vcpu->arch.timing_sum_duration[type]; 65 vcpu->arch.timing_sum_duration[type] += duration; 66 if (unlikely(old > vcpu->arch.timing_sum_duration[type])) { 67 printk(KERN_ERR"%s - wrap adding sum of durations" 68 " old %lld new %lld type %d exit # of type %d\n", 69 __func__, old, vcpu->arch.timing_sum_duration[type], 70 type, vcpu->arch.timing_count_type[type]); 71 } 72 73 /* square sum */ 74 old = vcpu->arch.timing_sum_quad_duration[type]; 75 vcpu->arch.timing_sum_quad_duration[type] += (duration*duration); 76 if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) { 77 printk(KERN_ERR"%s - wrap adding sum of squared durations" 78 " old %lld new %lld type %d exit # of type %d\n", 79 __func__, old, 80 vcpu->arch.timing_sum_quad_duration[type], 81 type, vcpu->arch.timing_count_type[type]); 82 } 83 84 /* set min/max */ 85 if (unlikely(duration < vcpu->arch.timing_min_duration[type])) 86 vcpu->arch.timing_min_duration[type] = duration; 87 if (unlikely(duration > vcpu->arch.timing_max_duration[type])) 88 vcpu->arch.timing_max_duration[type] = duration; 89 90 mutex_unlock(&vcpu->arch.exit_timing_lock); 91 } 92 93 void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) 94 { 95 u64 exit = vcpu->arch.timing_last_exit; 96 u64 enter = vcpu->arch.timing_last_enter.tv64; 97 98 /* save exit time, used next exit when the reenter time is known */ 99 vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64; 100 101 if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0)) 102 return; /* skip incomplete cycle (e.g. after reset) */ 103 104 /* update statistics for average and standard deviation */ 105 add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type); 106 /* enter -> timing_last_exit is time spent in guest - log this too */ 107 add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter), 108 TIMEINGUEST); 109 } 110 111 static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = { 112 [MMIO_EXITS] = "MMIO", 113 [DCR_EXITS] = "DCR", 114 [SIGNAL_EXITS] = "SIGNAL", 115 [ITLB_REAL_MISS_EXITS] = "ITLBREAL", 116 [ITLB_VIRT_MISS_EXITS] = "ITLBVIRT", 117 [DTLB_REAL_MISS_EXITS] = "DTLBREAL", 118 [DTLB_VIRT_MISS_EXITS] = "DTLBVIRT", 119 [SYSCALL_EXITS] = "SYSCALL", 120 [ISI_EXITS] = "ISI", 121 [DSI_EXITS] = "DSI", 122 [EMULATED_INST_EXITS] = "EMULINST", 123 [EMULATED_MTMSRWE_EXITS] = "EMUL_WAIT", 124 [EMULATED_WRTEE_EXITS] = "EMUL_WRTEE", 125 [EMULATED_MTSPR_EXITS] = "EMUL_MTSPR", 126 [EMULATED_MFSPR_EXITS] = "EMUL_MFSPR", 127 [EMULATED_MTMSR_EXITS] = "EMUL_MTMSR", 128 [EMULATED_MFMSR_EXITS] = "EMUL_MFMSR", 129 [EMULATED_TLBSX_EXITS] = "EMUL_TLBSX", 130 [EMULATED_TLBWE_EXITS] = "EMUL_TLBWE", 131 [EMULATED_RFI_EXITS] = "EMUL_RFI", 132 [DEC_EXITS] = "DEC", 133 [EXT_INTR_EXITS] = "EXTINT", 134 [HALT_WAKEUP] = "HALT", 135 [USR_PR_INST] = "USR_PR_INST", 136 [FP_UNAVAIL] = "FP_UNAVAIL", 137 [DEBUG_EXITS] = "DEBUG", 138 [TIMEINGUEST] = "TIMEINGUEST" 139 }; 140 141 static int kvmppc_exit_timing_show(struct seq_file *m, void *private) 142 { 143 struct kvm_vcpu *vcpu = m->private; 144 int i; 145 u64 min, max, sum, sum_quad; 146 147 seq_printf(m, "%s", "type count min max sum sum_squared\n"); 148 149 150 for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { 151 152 min = vcpu->arch.timing_min_duration[i]; 153 do_div(min, tb_ticks_per_usec); 154 max = vcpu->arch.timing_max_duration[i]; 155 do_div(max, tb_ticks_per_usec); 156 sum = vcpu->arch.timing_sum_duration[i]; 157 do_div(sum, tb_ticks_per_usec); 158 sum_quad = vcpu->arch.timing_sum_quad_duration[i]; 159 do_div(sum_quad, tb_ticks_per_usec); 160 161 seq_printf(m, "%12s %10d %10lld %10lld %20lld %20lld\n", 162 kvm_exit_names[i], 163 vcpu->arch.timing_count_type[i], 164 min, 165 max, 166 sum, 167 sum_quad); 168 169 } 170 return 0; 171 } 172 173 /* Write 'c' to clear the timing statistics. */ 174 static ssize_t kvmppc_exit_timing_write(struct file *file, 175 const char __user *user_buf, 176 size_t count, loff_t *ppos) 177 { 178 int err = -EINVAL; 179 char c; 180 181 if (count > 1) { 182 goto done; 183 } 184 185 if (get_user(c, user_buf)) { 186 err = -EFAULT; 187 goto done; 188 } 189 190 if (c == 'c') { 191 struct seq_file *seqf = file->private_data; 192 struct kvm_vcpu *vcpu = seqf->private; 193 /* Write does not affect our buffers previously generated with 194 * show. seq_file is locked here to prevent races of init with 195 * a show call */ 196 mutex_lock(&seqf->lock); 197 kvmppc_init_timing_stats(vcpu); 198 mutex_unlock(&seqf->lock); 199 err = count; 200 } 201 202 done: 203 return err; 204 } 205 206 static int kvmppc_exit_timing_open(struct inode *inode, struct file *file) 207 { 208 return single_open(file, kvmppc_exit_timing_show, inode->i_private); 209 } 210 211 static const struct file_operations kvmppc_exit_timing_fops = { 212 .owner = THIS_MODULE, 213 .open = kvmppc_exit_timing_open, 214 .read = seq_read, 215 .write = kvmppc_exit_timing_write, 216 .llseek = seq_lseek, 217 .release = single_release, 218 }; 219 220 void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id) 221 { 222 static char dbg_fname[50]; 223 struct dentry *debugfs_file; 224 225 snprintf(dbg_fname, sizeof(dbg_fname), "vm%u_vcpu%u_timing", 226 current->pid, id); 227 debugfs_file = debugfs_create_file(dbg_fname, 0666, 228 kvm_debugfs_dir, vcpu, 229 &kvmppc_exit_timing_fops); 230 231 if (!debugfs_file) { 232 printk(KERN_ERR"%s: error creating debugfs file %s\n", 233 __func__, dbg_fname); 234 return; 235 } 236 237 vcpu->arch.debugfs_exit_timing = debugfs_file; 238 } 239 240 void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu) 241 { 242 if (vcpu->arch.debugfs_exit_timing) { 243 debugfs_remove(vcpu->arch.debugfs_exit_timing); 244 vcpu->arch.debugfs_exit_timing = NULL; 245 } 246 } 247