tlb.c (b70366e5d31788650b2a5cec5cd13ea80ac7e44a) | tlb.c (372582a6c6fcced38219d06545dd26ad7904bc6f) |
---|---|
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that 7 * TLB handlers run from KSEG0 8 * --- 19 unchanged lines hidden (view full) --- 28 29#undef CONFIG_MIPS_MT 30#include <asm/r4kcache.h> 31#define CONFIG_MIPS_MT 32 33#define KVM_GUEST_PC_TLB 0 34#define KVM_GUEST_SP_TLB 1 35 | 1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that 7 * TLB handlers run from KSEG0 8 * --- 19 unchanged lines hidden (view full) --- 28 29#undef CONFIG_MIPS_MT 30#include <asm/r4kcache.h> 31#define CONFIG_MIPS_MT 32 33#define KVM_GUEST_PC_TLB 0 34#define KVM_GUEST_SP_TLB 1 35 |
36#ifdef CONFIG_KVM_MIPS_VZ 37static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu) 38{ 39 struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm; 40 41 if (cpu_has_guestid) 42 return 0; 43 else 44 return cpu_asid(smp_processor_id(), gpa_mm); 45} 46#endif 47 |
|
36static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) 37{ 38 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; 39 int cpu = smp_processor_id(); 40 41 return cpu_asid(cpu, kern_mm); 42} 43 --- 130 unchanged lines hidden (view full) --- 174 kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n", 175 __func__, (va & VPN2_MASK) | 176 kvm_mips_get_kernel_asid(vcpu), idx_kernel); 177 178 return 0; 179} 180EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv); 181 | 48static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) 49{ 50 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; 51 int cpu = smp_processor_id(); 52 53 return cpu_asid(cpu, kern_mm); 54} 55 --- 130 unchanged lines hidden (view full) --- 186 kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n", 187 __func__, (va & VPN2_MASK) | 188 kvm_mips_get_kernel_asid(vcpu), idx_kernel); 189 190 return 0; 191} 192EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv); 193 |
194#ifdef CONFIG_KVM_MIPS_VZ 195 196/* GuestID management */ 197 |
|
182/** | 198/** |
199 * clear_root_gid() - Set GuestCtl1.RID for normal root operation. 200 */ 201static inline void clear_root_gid(void) 202{ 203 if (cpu_has_guestid) { 204 clear_c0_guestctl1(MIPS_GCTL1_RID); 205 mtc0_tlbw_hazard(); 206 } 207} 208 209/** 210 * set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID. 211 * 212 * Sets the root GuestID to match the current guest GuestID, for TLB operation 213 * on the GPA->RPA mappings in the root TLB. 214 * 215 * The caller must be sure to disable HTW while the root GID is set, and 216 * possibly longer if TLB registers are modified. 217 */ 218static inline void set_root_gid_to_guest_gid(void) 219{ 220 unsigned int guestctl1; 221 222 if (cpu_has_guestid) { 223 back_to_back_c0_hazard(); 224 guestctl1 = read_c0_guestctl1(); 225 guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) | 226 ((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT) 227 << MIPS_GCTL1_RID_SHIFT; 228 write_c0_guestctl1(guestctl1); 229 mtc0_tlbw_hazard(); 230 } 231} 232 233int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) 234{ 235 int idx; 236 unsigned long flags, old_entryhi; 237 238 local_irq_save(flags); 239 htw_stop(); 240 241 /* Set root GuestID for root probe and write of guest TLB entry */ 242 set_root_gid_to_guest_gid(); 243 244 old_entryhi = read_c0_entryhi(); 245 246 idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | 247 kvm_mips_get_root_asid(vcpu)); 248 249 write_c0_entryhi(old_entryhi); 250 clear_root_gid(); 251 mtc0_tlbw_hazard(); 252 253 htw_start(); 254 local_irq_restore(flags); 255 256 if (idx > 0) 257 kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n", 258 __func__, (va & VPN2_MASK) | 259 kvm_mips_get_root_asid(vcpu), idx); 260 261 return 0; 262} 263EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv); 264 265/** 266 * kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping. 267 * @vcpu: KVM VCPU pointer. 268 * @gpa: Guest virtual address in a TLB mapped guest segment. 269 * @gpa: Ponter to output guest physical address it maps to. 270 * 271 * Converts a guest virtual address in a guest TLB mapped segment to a guest 272 * physical address, by probing the guest TLB. 273 * 274 * Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been 275 * written. 276 * -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not 277 * have been written. 278 */ 279int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, 280 unsigned long *gpa) 281{ 282 unsigned long o_entryhi, o_entrylo[2], o_pagemask; 283 unsigned int o_index; 284 unsigned long entrylo[2], pagemask, pagemaskbit, pa; 285 unsigned long flags; 286 int index; 287 288 /* Probe the guest TLB for a mapping */ 289 local_irq_save(flags); 290 /* Set root GuestID for root probe of guest TLB entry */ 291 htw_stop(); 292 set_root_gid_to_guest_gid(); 293 294 o_entryhi = read_gc0_entryhi(); 295 o_index = read_gc0_index(); 296 297 write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl)); 298 mtc0_tlbw_hazard(); 299 guest_tlb_probe(); 300 tlb_probe_hazard(); 301 302 index = read_gc0_index(); 303 if (index < 0) { 304 /* No match, fail */ 305 write_gc0_entryhi(o_entryhi); 306 write_gc0_index(o_index); 307 308 clear_root_gid(); 309 htw_start(); 310 local_irq_restore(flags); 311 return -EFAULT; 312 } 313 314 /* Match! read the TLB entry */ 315 o_entrylo[0] = read_gc0_entrylo0(); 316 o_entrylo[1] = read_gc0_entrylo1(); 317 o_pagemask = read_gc0_pagemask(); 318 319 mtc0_tlbr_hazard(); 320 guest_tlb_read(); 321 tlb_read_hazard(); 322 323 entrylo[0] = read_gc0_entrylo0(); 324 entrylo[1] = read_gc0_entrylo1(); 325 pagemask = ~read_gc0_pagemask() & ~0x1fffl; 326 327 write_gc0_entryhi(o_entryhi); 328 write_gc0_index(o_index); 329 write_gc0_entrylo0(o_entrylo[0]); 330 write_gc0_entrylo1(o_entrylo[1]); 331 write_gc0_pagemask(o_pagemask); 332 333 clear_root_gid(); 334 htw_start(); 335 local_irq_restore(flags); 336 337 /* Select one of the EntryLo values and interpret the GPA */ 338 pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1; 339 pa = entrylo[!!(gva & pagemaskbit)]; 340 341 /* 342 * TLB entry may have become invalid since TLB probe if physical FTLB 343 * entries are shared between threads (e.g. I6400). 344 */ 345 if (!(pa & ENTRYLO_V)) 346 return -EFAULT; 347 348 /* 349 * Note, this doesn't take guest MIPS32 XPA into account, where PFN is 350 * split with XI/RI in the middle. 351 */ 352 pa = (pa << 6) & ~0xfffl; 353 pa |= gva & ~(pagemask | pagemaskbit); 354 355 *gpa = pa; 356 return 0; 357} 358EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup); 359 360/** 361 * kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for 362 * guests. 363 * 364 * Invalidate all entries in root tlb which are GPA mappings. 365 */ 366void kvm_vz_local_flush_roottlb_all_guests(void) 367{ 368 unsigned long flags; 369 unsigned long old_entryhi, old_pagemask, old_guestctl1; 370 int entry; 371 372 if (WARN_ON(!cpu_has_guestid)) 373 return; 374 375 local_irq_save(flags); 376 htw_stop(); 377 378 /* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */ 379 old_entryhi = read_c0_entryhi(); 380 old_pagemask = read_c0_pagemask(); 381 old_guestctl1 = read_c0_guestctl1(); 382 383 /* 384 * Invalidate guest entries in root TLB while leaving root entries 385 * intact when possible. 386 */ 387 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { 388 write_c0_index(entry); 389 mtc0_tlbw_hazard(); 390 tlb_read(); 391 tlb_read_hazard(); 392 393 /* Don't invalidate non-guest (RVA) mappings in the root TLB */ 394 if (!(read_c0_guestctl1() & MIPS_GCTL1_RID)) 395 continue; 396 397 /* Make sure all entries differ. */ 398 write_c0_entryhi(UNIQUE_ENTRYHI(entry)); 399 write_c0_entrylo0(0); 400 write_c0_entrylo1(0); 401 write_c0_guestctl1(0); 402 mtc0_tlbw_hazard(); 403 tlb_write_indexed(); 404 } 405 406 write_c0_entryhi(old_entryhi); 407 write_c0_pagemask(old_pagemask); 408 write_c0_guestctl1(old_guestctl1); 409 tlbw_use_hazard(); 410 411 htw_start(); 412 local_irq_restore(flags); 413} 414EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests); 415 416/** 417 * kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries. 418 * 419 * Invalidate all entries in guest tlb irrespective of guestid. 420 */ 421void kvm_vz_local_flush_guesttlb_all(void) 422{ 423 unsigned long flags; 424 unsigned long old_index; 425 unsigned long old_entryhi; 426 unsigned long old_entrylo[2]; 427 unsigned long old_pagemask; 428 int entry; 429 430 local_irq_save(flags); 431 432 /* Preserve all clobbered guest registers */ 433 old_index = read_gc0_index(); 434 old_entryhi = read_gc0_entryhi(); 435 old_entrylo[0] = read_gc0_entrylo0(); 436 old_entrylo[1] = read_gc0_entrylo1(); 437 old_pagemask = read_gc0_pagemask(); 438 439 /* Invalidate guest entries in guest TLB */ 440 write_gc0_entrylo0(0); 441 write_gc0_entrylo1(0); 442 write_gc0_pagemask(0); 443 for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) { 444 /* Make sure all entries differ. */ 445 write_gc0_index(entry); 446 write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry)); 447 mtc0_tlbw_hazard(); 448 guest_tlb_write_indexed(); 449 } 450 write_gc0_index(old_index); 451 write_gc0_entryhi(old_entryhi); 452 write_gc0_entrylo0(old_entrylo[0]); 453 write_gc0_entrylo1(old_entrylo[1]); 454 write_gc0_pagemask(old_pagemask); 455 tlbw_use_hazard(); 456 457 local_irq_restore(flags); 458} 459EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all); 460 461/** 462 * kvm_vz_save_guesttlb() - Save a range of guest TLB entries. 463 * @buf: Buffer to write TLB entries into. 464 * @index: Start index. 465 * @count: Number of entries to save. 466 * 467 * Save a range of guest TLB entries. The caller must ensure interrupts are 468 * disabled. 469 */ 470void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index, 471 unsigned int count) 472{ 473 unsigned int end = index + count; 474 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask; 475 unsigned int guestctl1 = 0; 476 int old_index, i; 477 478 /* Save registers we're about to clobber */ 479 old_index = read_gc0_index(); 480 old_entryhi = read_gc0_entryhi(); 481 old_entrylo0 = read_gc0_entrylo0(); 482 old_entrylo1 = read_gc0_entrylo1(); 483 old_pagemask = read_gc0_pagemask(); 484 485 /* Set root GuestID for root probe */ 486 htw_stop(); 487 set_root_gid_to_guest_gid(); 488 if (cpu_has_guestid) 489 guestctl1 = read_c0_guestctl1(); 490 491 /* Read each entry from guest TLB */ 492 for (i = index; i < end; ++i, ++buf) { 493 write_gc0_index(i); 494 495 mtc0_tlbr_hazard(); 496 guest_tlb_read(); 497 tlb_read_hazard(); 498 499 if (cpu_has_guestid && 500 (read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) { 501 /* Entry invalid or belongs to another guest */ 502 buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i); 503 buf->tlb_lo[0] = 0; 504 buf->tlb_lo[1] = 0; 505 buf->tlb_mask = 0; 506 } else { 507 /* Entry belongs to the right guest */ 508 buf->tlb_hi = read_gc0_entryhi(); 509 buf->tlb_lo[0] = read_gc0_entrylo0(); 510 buf->tlb_lo[1] = read_gc0_entrylo1(); 511 buf->tlb_mask = read_gc0_pagemask(); 512 } 513 } 514 515 /* Clear root GuestID again */ 516 clear_root_gid(); 517 htw_start(); 518 519 /* Restore clobbered registers */ 520 write_gc0_index(old_index); 521 write_gc0_entryhi(old_entryhi); 522 write_gc0_entrylo0(old_entrylo0); 523 write_gc0_entrylo1(old_entrylo1); 524 write_gc0_pagemask(old_pagemask); 525 526 tlbw_use_hazard(); 527} 528EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb); 529 530/** 531 * kvm_vz_load_guesttlb() - Save a range of guest TLB entries. 532 * @buf: Buffer to read TLB entries from. 533 * @index: Start index. 534 * @count: Number of entries to load. 535 * 536 * Load a range of guest TLB entries. The caller must ensure interrupts are 537 * disabled. 538 */ 539void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index, 540 unsigned int count) 541{ 542 unsigned int end = index + count; 543 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask; 544 int old_index, i; 545 546 /* Save registers we're about to clobber */ 547 old_index = read_gc0_index(); 548 old_entryhi = read_gc0_entryhi(); 549 old_entrylo0 = read_gc0_entrylo0(); 550 old_entrylo1 = read_gc0_entrylo1(); 551 old_pagemask = read_gc0_pagemask(); 552 553 /* Set root GuestID for root probe */ 554 htw_stop(); 555 set_root_gid_to_guest_gid(); 556 557 /* Write each entry to guest TLB */ 558 for (i = index; i < end; ++i, ++buf) { 559 write_gc0_index(i); 560 write_gc0_entryhi(buf->tlb_hi); 561 write_gc0_entrylo0(buf->tlb_lo[0]); 562 write_gc0_entrylo1(buf->tlb_lo[1]); 563 write_gc0_pagemask(buf->tlb_mask); 564 565 mtc0_tlbw_hazard(); 566 guest_tlb_write_indexed(); 567 } 568 569 /* Clear root GuestID again */ 570 clear_root_gid(); 571 htw_start(); 572 573 /* Restore clobbered registers */ 574 write_gc0_index(old_index); 575 write_gc0_entryhi(old_entryhi); 576 write_gc0_entrylo0(old_entrylo0); 577 write_gc0_entrylo1(old_entrylo1); 578 write_gc0_pagemask(old_pagemask); 579 580 tlbw_use_hazard(); 581} 582EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb); 583 584#endif 585 586/** |
|
183 * kvm_mips_suspend_mm() - Suspend the active mm. 184 * @cpu The CPU we're running on. 185 * 186 * Suspend the active_mm, ready for a switch to a KVM guest virtual address 187 * space. This is left active for the duration of guest context, including time 188 * with interrupts enabled, so we need to be careful not to confuse e.g. cache 189 * management IPIs. 190 * --- 25 unchanged lines hidden --- | 587 * kvm_mips_suspend_mm() - Suspend the active mm. 588 * @cpu The CPU we're running on. 589 * 590 * Suspend the active_mm, ready for a switch to a KVM guest virtual address 591 * space. This is left active for the duration of guest context, including time 592 * with interrupts enabled, so we need to be careful not to confuse e.g. cache 593 * management IPIs. 594 * --- 25 unchanged lines hidden --- |