1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that 7 * TLB handlers run from KSEG0 8 * 9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 10 * Authors: Sanjay Lal <sanjayl@kymasys.com> 11 */ 12 13 #include <linux/sched.h> 14 #include <linux/smp.h> 15 #include <linux/mm.h> 16 #include <linux/delay.h> 17 #include <linux/export.h> 18 #include <linux/kvm_host.h> 19 #include <linux/srcu.h> 20 21 #include <asm/cpu.h> 22 #include <asm/bootinfo.h> 23 #include <asm/mmu_context.h> 24 #include <asm/pgtable.h> 25 #include <asm/cacheflush.h> 26 #include <asm/tlb.h> 27 #include <asm/tlbdebug.h> 28 29 #undef CONFIG_MIPS_MT 30 #include <asm/r4kcache.h> 31 #define CONFIG_MIPS_MT 32 33 #define KVM_GUEST_PC_TLB 0 34 #define KVM_GUEST_SP_TLB 1 35 36 atomic_t kvm_mips_instance; 37 EXPORT_SYMBOL_GPL(kvm_mips_instance); 38 39 static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) 40 { 41 int cpu = smp_processor_id(); 42 43 return vcpu->arch.guest_kernel_asid[cpu] & 44 cpu_asid_mask(&cpu_data[cpu]); 45 } 46 47 static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) 48 { 49 int cpu = smp_processor_id(); 50 51 return vcpu->arch.guest_user_asid[cpu] & 52 cpu_asid_mask(&cpu_data[cpu]); 53 } 54 55 inline u32 kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu) 56 { 57 return vcpu->kvm->arch.commpage_tlb; 58 } 59 60 /* Structure defining an tlb entry data set. */ 61 62 void kvm_mips_dump_host_tlbs(void) 63 { 64 unsigned long flags; 65 66 local_irq_save(flags); 67 68 kvm_info("HOST TLBs:\n"); 69 dump_tlb_regs(); 70 pr_info("\n"); 71 dump_tlb_all(); 72 73 local_irq_restore(flags); 74 } 75 EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs); 76 77 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) 78 { 79 struct mips_coproc *cop0 = vcpu->arch.cop0; 80 struct kvm_mips_tlb tlb; 81 int i; 82 83 kvm_info("Guest TLBs:\n"); 84 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); 85 86 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 87 tlb = vcpu->arch.guest_tlb[i]; 88 kvm_info("TLB%c%3d Hi 0x%08lx ", 89 (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V 90 ? ' ' : '*', 91 i, tlb.tlb_hi); 92 kvm_info("Lo0=0x%09llx %c%c attr %lx ", 93 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]), 94 (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ', 95 (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ', 96 (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT); 97 kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n", 98 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]), 99 (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ', 100 (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ', 101 (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT, 102 tlb.tlb_mask); 103 } 104 } 105 EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs); 106 107 /* XXXKYMA: Must be called with interrupts disabled */ 108 /* set flush_dcache_mask == 0 if no dcache flush required */ 109 int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, 110 unsigned long entrylo0, unsigned long entrylo1, 111 int flush_dcache_mask) 112 { 113 unsigned long flags; 114 unsigned long old_entryhi; 115 int idx; 116 117 local_irq_save(flags); 118 119 old_entryhi = read_c0_entryhi(); 120 write_c0_entryhi(entryhi); 121 mtc0_tlbw_hazard(); 122 123 tlb_probe(); 124 tlb_probe_hazard(); 125 idx = read_c0_index(); 126 127 if (idx > current_cpu_data.tlbsize) { 128 kvm_err("%s: Invalid Index: %d\n", __func__, idx); 129 kvm_mips_dump_host_tlbs(); 130 local_irq_restore(flags); 131 return -1; 132 } 133 134 write_c0_entrylo0(entrylo0); 135 write_c0_entrylo1(entrylo1); 136 mtc0_tlbw_hazard(); 137 138 if (idx < 0) 139 tlb_write_random(); 140 else 141 tlb_write_indexed(); 142 tlbw_use_hazard(); 143 144 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n", 145 vcpu->arch.pc, idx, read_c0_entryhi(), 146 read_c0_entrylo0(), read_c0_entrylo1()); 147 148 /* Flush D-cache */ 149 if (flush_dcache_mask) { 150 if (entrylo0 & ENTRYLO_V) { 151 ++vcpu->stat.flush_dcache_exits; 152 flush_data_cache_page((entryhi & VPN2_MASK) & 153 ~flush_dcache_mask); 154 } 155 if (entrylo1 & ENTRYLO_V) { 156 ++vcpu->stat.flush_dcache_exits; 157 flush_data_cache_page(((entryhi & VPN2_MASK) & 158 ~flush_dcache_mask) | 159 (0x1 << PAGE_SHIFT)); 160 } 161 } 162 163 /* Restore old ASID */ 164 write_c0_entryhi(old_entryhi); 165 mtc0_tlbw_hazard(); 166 local_irq_restore(flags); 167 return 0; 168 } 169 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_write); 170 171 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, 172 struct kvm_vcpu *vcpu) 173 { 174 kvm_pfn_t pfn0, pfn1; 175 unsigned long flags, old_entryhi = 0, vaddr = 0; 176 unsigned long entrylo0 = 0, entrylo1 = 0; 177 178 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT; 179 pfn1 = 0; 180 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | 181 (0x3 << ENTRYLO_C_SHIFT) | ENTRYLO_D | ENTRYLO_V; 182 entrylo1 = 0; 183 184 local_irq_save(flags); 185 186 old_entryhi = read_c0_entryhi(); 187 vaddr = badvaddr & (PAGE_MASK << 1); 188 write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu)); 189 write_c0_entrylo0(entrylo0); 190 write_c0_entrylo1(entrylo1); 191 write_c0_index(kvm_mips_get_commpage_asid(vcpu)); 192 mtc0_tlbw_hazard(); 193 tlb_write_indexed(); 194 tlbw_use_hazard(); 195 196 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", 197 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), 198 read_c0_entrylo0(), read_c0_entrylo1()); 199 200 /* Restore old ASID */ 201 write_c0_entryhi(old_entryhi); 202 mtc0_tlbw_hazard(); 203 local_irq_restore(flags); 204 205 return 0; 206 } 207 EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault); 208 209 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) 210 { 211 int i; 212 int index = -1; 213 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; 214 215 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 216 if (TLB_HI_VPN2_HIT(tlb[i], entryhi) && 217 TLB_HI_ASID_HIT(tlb[i], entryhi)) { 218 index = i; 219 break; 220 } 221 } 222 223 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n", 224 __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]); 225 226 return index; 227 } 228 EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup); 229 230 int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) 231 { 232 unsigned long old_entryhi, flags; 233 int idx; 234 235 local_irq_save(flags); 236 237 old_entryhi = read_c0_entryhi(); 238 239 if (KVM_GUEST_KERNEL_MODE(vcpu)) 240 write_c0_entryhi((vaddr & VPN2_MASK) | 241 kvm_mips_get_kernel_asid(vcpu)); 242 else { 243 write_c0_entryhi((vaddr & VPN2_MASK) | 244 kvm_mips_get_user_asid(vcpu)); 245 } 246 247 mtc0_tlbw_hazard(); 248 249 tlb_probe(); 250 tlb_probe_hazard(); 251 idx = read_c0_index(); 252 253 /* Restore old ASID */ 254 write_c0_entryhi(old_entryhi); 255 mtc0_tlbw_hazard(); 256 257 local_irq_restore(flags); 258 259 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx); 260 261 return idx; 262 } 263 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup); 264 265 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) 266 { 267 int idx; 268 unsigned long flags, old_entryhi; 269 270 local_irq_save(flags); 271 272 old_entryhi = read_c0_entryhi(); 273 274 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); 275 mtc0_tlbw_hazard(); 276 277 tlb_probe(); 278 tlb_probe_hazard(); 279 idx = read_c0_index(); 280 281 if (idx >= current_cpu_data.tlbsize) 282 BUG(); 283 284 if (idx > 0) { 285 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 286 write_c0_entrylo0(0); 287 write_c0_entrylo1(0); 288 mtc0_tlbw_hazard(); 289 290 tlb_write_indexed(); 291 tlbw_use_hazard(); 292 } 293 294 write_c0_entryhi(old_entryhi); 295 mtc0_tlbw_hazard(); 296 297 local_irq_restore(flags); 298 299 if (idx > 0) 300 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__, 301 (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx); 302 303 return 0; 304 } 305 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv); 306 307 void kvm_mips_flush_host_tlb(int skip_kseg0) 308 { 309 unsigned long flags; 310 unsigned long old_entryhi, entryhi; 311 unsigned long old_pagemask; 312 int entry = 0; 313 int maxentry = current_cpu_data.tlbsize; 314 315 local_irq_save(flags); 316 317 old_entryhi = read_c0_entryhi(); 318 old_pagemask = read_c0_pagemask(); 319 320 /* Blast 'em all away. */ 321 for (entry = 0; entry < maxentry; entry++) { 322 write_c0_index(entry); 323 324 if (skip_kseg0) { 325 mtc0_tlbr_hazard(); 326 tlb_read(); 327 tlb_read_hazard(); 328 329 entryhi = read_c0_entryhi(); 330 331 /* Don't blow away guest kernel entries */ 332 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) 333 continue; 334 } 335 336 /* Make sure all entries differ. */ 337 write_c0_entryhi(UNIQUE_ENTRYHI(entry)); 338 write_c0_entrylo0(0); 339 write_c0_entrylo1(0); 340 mtc0_tlbw_hazard(); 341 342 tlb_write_indexed(); 343 tlbw_use_hazard(); 344 } 345 346 write_c0_entryhi(old_entryhi); 347 write_c0_pagemask(old_pagemask); 348 mtc0_tlbw_hazard(); 349 350 local_irq_restore(flags); 351 } 352 EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb); 353 354 void kvm_local_flush_tlb_all(void) 355 { 356 unsigned long flags; 357 unsigned long old_ctx; 358 int entry = 0; 359 360 local_irq_save(flags); 361 /* Save old context and create impossible VPN2 value */ 362 old_ctx = read_c0_entryhi(); 363 write_c0_entrylo0(0); 364 write_c0_entrylo1(0); 365 366 /* Blast 'em all away. */ 367 while (entry < current_cpu_data.tlbsize) { 368 /* Make sure all entries differ. */ 369 write_c0_entryhi(UNIQUE_ENTRYHI(entry)); 370 write_c0_index(entry); 371 mtc0_tlbw_hazard(); 372 tlb_write_indexed(); 373 tlbw_use_hazard(); 374 entry++; 375 } 376 write_c0_entryhi(old_ctx); 377 mtc0_tlbw_hazard(); 378 379 local_irq_restore(flags); 380 } 381 EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all); 382