1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * 4 * Copyright SUSE Linux Products GmbH 2010 5 * 6 * Authors: Alexander Graf <agraf@suse.de> 7 */ 8 9 #ifndef __ASM_KVM_BOOK3S_64_H__ 10 #define __ASM_KVM_BOOK3S_64_H__ 11 12 #include <linux/string.h> 13 #include <asm/bitops.h> 14 #include <asm/book3s/64/mmu-hash.h> 15 #include <asm/cpu_has_feature.h> 16 #include <asm/ppc-opcode.h> 17 18 #ifdef CONFIG_PPC_PSERIES 19 static inline bool kvmhv_on_pseries(void) 20 { 21 return !cpu_has_feature(CPU_FTR_HVMODE); 22 } 23 #else 24 static inline bool kvmhv_on_pseries(void) 25 { 26 return false; 27 } 28 #endif 29 30 /* 31 * Structure for a nested guest, that is, for a guest that is managed by 32 * one of our guests. 33 */ 34 struct kvm_nested_guest { 35 struct kvm *l1_host; /* L1 VM that owns this nested guest */ 36 int l1_lpid; /* lpid L1 guest thinks this guest is */ 37 int shadow_lpid; /* real lpid of this nested guest */ 38 pgd_t *shadow_pgtable; /* our page table for this guest */ 39 u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */ 40 u64 process_table; /* process table entry for this guest */ 41 long refcnt; /* number of pointers to this struct */ 42 struct mutex tlb_lock; /* serialize page faults and tlbies */ 43 struct kvm_nested_guest *next; 44 cpumask_t need_tlb_flush; 45 cpumask_t cpu_in_guest; 46 short prev_cpu[NR_CPUS]; 47 u8 radix; /* is this nested guest radix */ 48 }; 49 50 /* 51 * We define a nested rmap entry as a single 64-bit quantity 52 * 0xFFF0000000000000 12-bit lpid field 53 * 0x000FFFFFFFFFF000 40-bit guest 4k page frame number 54 * 0x0000000000000001 1-bit single entry flag 55 */ 56 #define RMAP_NESTED_LPID_MASK 0xFFF0000000000000UL 57 #define RMAP_NESTED_LPID_SHIFT (52) 58 #define RMAP_NESTED_GPA_MASK 0x000FFFFFFFFFF000UL 59 #define RMAP_NESTED_IS_SINGLE_ENTRY 0x0000000000000001UL 60 61 /* Structure for a nested guest rmap entry */ 62 struct rmap_nested { 63 struct llist_node list; 64 u64 rmap; 65 }; 66 67 /* 68 * for_each_nest_rmap_safe - iterate over the list of nested rmap entries 69 * safe against removal of the list entry or NULL list 70 * @pos: a (struct rmap_nested *) to use as a loop cursor 71 * @node: pointer to the first entry 72 * NOTE: this can be NULL 73 * @rmapp: an (unsigned long *) in which to return the rmap entries on each 74 * iteration 75 * NOTE: this must point to already allocated memory 76 * 77 * The nested_rmap is a llist of (struct rmap_nested) entries pointed to by the 78 * rmap entry in the memslot. The list is always terminated by a "single entry" 79 * stored in the list element of the final entry of the llist. If there is ONLY 80 * a single entry then this is itself in the rmap entry of the memslot, not a 81 * llist head pointer. 82 * 83 * Note that the iterator below assumes that a nested rmap entry is always 84 * non-zero. This is true for our usage because the LPID field is always 85 * non-zero (zero is reserved for the host). 86 * 87 * This should be used to iterate over the list of rmap_nested entries with 88 * processing done on the u64 rmap value given by each iteration. This is safe 89 * against removal of list entries and it is always safe to call free on (pos). 90 * 91 * e.g. 92 * struct rmap_nested *cursor; 93 * struct llist_node *first; 94 * unsigned long rmap; 95 * for_each_nest_rmap_safe(cursor, first, &rmap) { 96 * do_something(rmap); 97 * free(cursor); 98 * } 99 */ 100 #define for_each_nest_rmap_safe(pos, node, rmapp) \ 101 for ((pos) = llist_entry((node), typeof(*(pos)), list); \ 102 (node) && \ 103 (*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \ 104 ((u64) (node)) : ((pos)->rmap))) && \ 105 (((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \ 106 ((struct llist_node *) ((pos) = NULL)) : \ 107 (pos)->list.next)), true); \ 108 (pos) = llist_entry((node), typeof(*(pos)), list)) 109 110 struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, 111 bool create); 112 void kvmhv_put_nested(struct kvm_nested_guest *gp); 113 int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid); 114 115 /* Encoding of first parameter for H_TLB_INVALIDATE */ 116 #define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \ 117 ___PPC_R(r)) 118 119 /* Power architecture requires HPT is at least 256kiB, at most 64TiB */ 120 #define PPC_MIN_HPT_ORDER 18 121 #define PPC_MAX_HPT_ORDER 46 122 123 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 124 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) 125 { 126 preempt_disable(); 127 return &get_paca()->shadow_vcpu; 128 } 129 130 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) 131 { 132 preempt_enable(); 133 } 134 #endif 135 136 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 137 138 static inline bool kvm_is_radix(struct kvm *kvm) 139 { 140 return kvm->arch.radix; 141 } 142 143 static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu) 144 { 145 bool radix; 146 147 if (vcpu->arch.nested) 148 radix = vcpu->arch.nested->radix; 149 else 150 radix = kvm_is_radix(vcpu->kvm); 151 152 return radix; 153 } 154 155 #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ 156 #endif 157 158 /* 159 * We use a lock bit in HPTE dword 0 to synchronize updates and 160 * accesses to each HPTE, and another bit to indicate non-present 161 * HPTEs. 162 */ 163 #define HPTE_V_HVLOCK 0x40UL 164 #define HPTE_V_ABSENT 0x20UL 165 166 /* 167 * We use this bit in the guest_rpte field of the revmap entry 168 * to indicate a modified HPTE. 169 */ 170 #define HPTE_GR_MODIFIED (1ul << 62) 171 172 /* These bits are reserved in the guest view of the HPTE */ 173 #define HPTE_GR_RESERVED HPTE_GR_MODIFIED 174 175 static inline long try_lock_hpte(__be64 *hpte, unsigned long bits) 176 { 177 unsigned long tmp, old; 178 __be64 be_lockbit, be_bits; 179 180 /* 181 * We load/store in native endian, but the HTAB is in big endian. If 182 * we byte swap all data we apply on the PTE we're implicitly correct 183 * again. 184 */ 185 be_lockbit = cpu_to_be64(HPTE_V_HVLOCK); 186 be_bits = cpu_to_be64(bits); 187 188 asm volatile(" ldarx %0,0,%2\n" 189 " and. %1,%0,%3\n" 190 " bne 2f\n" 191 " or %0,%0,%4\n" 192 " stdcx. %0,0,%2\n" 193 " beq+ 2f\n" 194 " mr %1,%3\n" 195 "2: isync" 196 : "=&r" (tmp), "=&r" (old) 197 : "r" (hpte), "r" (be_bits), "r" (be_lockbit) 198 : "cc", "memory"); 199 return old == 0; 200 } 201 202 static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v) 203 { 204 hpte_v &= ~HPTE_V_HVLOCK; 205 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); 206 hpte[0] = cpu_to_be64(hpte_v); 207 } 208 209 /* Without barrier */ 210 static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v) 211 { 212 hpte_v &= ~HPTE_V_HVLOCK; 213 hpte[0] = cpu_to_be64(hpte_v); 214 } 215 216 /* 217 * These functions encode knowledge of the POWER7/8/9 hardware 218 * interpretations of the HPTE LP (large page size) field. 219 */ 220 static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l) 221 { 222 unsigned int lphi; 223 224 if (!(h & HPTE_V_LARGE)) 225 return 12; /* 4kB */ 226 lphi = (l >> 16) & 0xf; 227 switch ((l >> 12) & 0xf) { 228 case 0: 229 return !lphi ? 24 : 0; /* 16MB */ 230 break; 231 case 1: 232 return 16; /* 64kB */ 233 break; 234 case 3: 235 return !lphi ? 34 : 0; /* 16GB */ 236 break; 237 case 7: 238 return (16 << 8) + 12; /* 64kB in 4kB */ 239 break; 240 case 8: 241 if (!lphi) 242 return (24 << 8) + 16; /* 16MB in 64kkB */ 243 if (lphi == 3) 244 return (24 << 8) + 12; /* 16MB in 4kB */ 245 break; 246 } 247 return 0; 248 } 249 250 static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l) 251 { 252 return kvmppc_hpte_page_shifts(h, l) & 0xff; 253 } 254 255 static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l) 256 { 257 int tmp = kvmppc_hpte_page_shifts(h, l); 258 259 if (tmp >= 0x100) 260 tmp >>= 8; 261 return tmp; 262 } 263 264 static inline unsigned long kvmppc_actual_pgsz(unsigned long v, unsigned long r) 265 { 266 int shift = kvmppc_hpte_actual_page_shift(v, r); 267 268 if (shift) 269 return 1ul << shift; 270 return 0; 271 } 272 273 static inline int kvmppc_pgsize_lp_encoding(int base_shift, int actual_shift) 274 { 275 switch (base_shift) { 276 case 12: 277 switch (actual_shift) { 278 case 12: 279 return 0; 280 case 16: 281 return 7; 282 case 24: 283 return 0x38; 284 } 285 break; 286 case 16: 287 switch (actual_shift) { 288 case 16: 289 return 1; 290 case 24: 291 return 8; 292 } 293 break; 294 case 24: 295 return 0; 296 } 297 return -1; 298 } 299 300 static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, 301 unsigned long pte_index) 302 { 303 int a_pgshift, b_pgshift; 304 unsigned long rb = 0, va_low, sllp; 305 306 b_pgshift = a_pgshift = kvmppc_hpte_page_shifts(v, r); 307 if (a_pgshift >= 0x100) { 308 b_pgshift &= 0xff; 309 a_pgshift >>= 8; 310 } 311 312 /* 313 * Ignore the top 14 bits of va 314 * v have top two bits covering segment size, hence move 315 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits. 316 * AVA field in v also have the lower 23 bits ignored. 317 * For base page size 4K we need 14 .. 65 bits (so need to 318 * collect extra 11 bits) 319 * For others we need 14..14+i 320 */ 321 /* This covers 14..54 bits of va*/ 322 rb = (v & ~0x7fUL) << 16; /* AVA field */ 323 324 /* 325 * AVA in v had cleared lower 23 bits. We need to derive 326 * that from pteg index 327 */ 328 va_low = pte_index >> 3; 329 if (v & HPTE_V_SECONDARY) 330 va_low = ~va_low; 331 /* 332 * get the vpn bits from va_low using reverse of hashing. 333 * In v we have va with 23 bits dropped and then left shifted 334 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need 335 * right shift it with (SID_SHIFT - (23 - 7)) 336 */ 337 if (!(v & HPTE_V_1TB_SEG)) 338 va_low ^= v >> (SID_SHIFT - 16); 339 else 340 va_low ^= v >> (SID_SHIFT_1T - 16); 341 va_low &= 0x7ff; 342 343 if (b_pgshift <= 12) { 344 if (a_pgshift > 12) { 345 sllp = (a_pgshift == 16) ? 5 : 4; 346 rb |= sllp << 5; /* AP field */ 347 } 348 rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */ 349 } else { 350 int aval_shift; 351 /* 352 * remaining bits of AVA/LP fields 353 * Also contain the rr bits of LP 354 */ 355 rb |= (va_low << b_pgshift) & 0x7ff000; 356 /* 357 * Now clear not needed LP bits based on actual psize 358 */ 359 rb &= ~((1ul << a_pgshift) - 1); 360 /* 361 * AVAL field 58..77 - base_page_shift bits of va 362 * we have space for 58..64 bits, Missing bits should 363 * be zero filled. +1 is to take care of L bit shift 364 */ 365 aval_shift = 64 - (77 - b_pgshift) + 1; 366 rb |= ((va_low << aval_shift) & 0xfe); 367 368 rb |= 1; /* L field */ 369 rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */ 370 } 371 rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */ 372 return rb; 373 } 374 375 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize) 376 { 377 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT; 378 } 379 380 static inline int hpte_is_writable(unsigned long ptel) 381 { 382 unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP); 383 384 return pp != PP_RXRX && pp != PP_RXXX; 385 } 386 387 static inline unsigned long hpte_make_readonly(unsigned long ptel) 388 { 389 if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX) 390 ptel = (ptel & ~HPTE_R_PP) | PP_RXXX; 391 else 392 ptel |= PP_RXRX; 393 return ptel; 394 } 395 396 static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci) 397 { 398 unsigned int wimg = hptel & HPTE_R_WIMG; 399 400 /* Handle SAO */ 401 if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) && 402 cpu_has_feature(CPU_FTR_ARCH_206)) 403 wimg = HPTE_R_M; 404 405 if (!is_ci) 406 return wimg == HPTE_R_M; 407 /* 408 * if host is mapped cache inhibited, make sure hptel also have 409 * cache inhibited. 410 */ 411 if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */ 412 return false; 413 return !!(wimg & HPTE_R_I); 414 } 415 416 /* 417 * If it's present and writable, atomically set dirty and referenced bits and 418 * return the PTE, otherwise return 0. 419 */ 420 static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing) 421 { 422 pte_t old_pte, new_pte = __pte(0); 423 424 while (1) { 425 /* 426 * Make sure we don't reload from ptep 427 */ 428 old_pte = READ_ONCE(*ptep); 429 /* 430 * wait until H_PAGE_BUSY is clear then set it atomically 431 */ 432 if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) { 433 cpu_relax(); 434 continue; 435 } 436 /* If pte is not present return None */ 437 if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT))) 438 return __pte(0); 439 440 new_pte = pte_mkyoung(old_pte); 441 if (writing && pte_write(old_pte)) 442 new_pte = pte_mkdirty(new_pte); 443 444 if (pte_xchg(ptep, old_pte, new_pte)) 445 break; 446 } 447 return new_pte; 448 } 449 450 static inline bool hpte_read_permission(unsigned long pp, unsigned long key) 451 { 452 if (key) 453 return PP_RWRX <= pp && pp <= PP_RXRX; 454 return true; 455 } 456 457 static inline bool hpte_write_permission(unsigned long pp, unsigned long key) 458 { 459 if (key) 460 return pp == PP_RWRW; 461 return pp <= PP_RWRW; 462 } 463 464 static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr) 465 { 466 unsigned long skey; 467 468 skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) | 469 ((hpte_r & HPTE_R_KEY_LO) >> 9); 470 return (amr >> (62 - 2 * skey)) & 3; 471 } 472 473 static inline void lock_rmap(unsigned long *rmap) 474 { 475 do { 476 while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap)) 477 cpu_relax(); 478 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap)); 479 } 480 481 static inline void unlock_rmap(unsigned long *rmap) 482 { 483 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap); 484 } 485 486 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot, 487 unsigned long pagesize) 488 { 489 unsigned long mask = (pagesize >> PAGE_SHIFT) - 1; 490 491 if (pagesize <= PAGE_SIZE) 492 return true; 493 return !(memslot->base_gfn & mask) && !(memslot->npages & mask); 494 } 495 496 /* 497 * This works for 4k, 64k and 16M pages on POWER7, 498 * and 4k and 16M pages on PPC970. 499 */ 500 static inline unsigned long slb_pgsize_encoding(unsigned long psize) 501 { 502 unsigned long senc = 0; 503 504 if (psize > 0x1000) { 505 senc = SLB_VSID_L; 506 if (psize == 0x10000) 507 senc |= SLB_VSID_LP_01; 508 } 509 return senc; 510 } 511 512 static inline int is_vrma_hpte(unsigned long hpte_v) 513 { 514 return (hpte_v & ~0xffffffUL) == 515 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16))); 516 } 517 518 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 519 /* 520 * Note modification of an HPTE; set the HPTE modified bit 521 * if anyone is interested. 522 */ 523 static inline void note_hpte_modification(struct kvm *kvm, 524 struct revmap_entry *rev) 525 { 526 if (atomic_read(&kvm->arch.hpte_mod_interest)) 527 rev->guest_rpte |= HPTE_GR_MODIFIED; 528 } 529 530 /* 531 * Like kvm_memslots(), but for use in real mode when we can't do 532 * any RCU stuff (since the secondary threads are offline from the 533 * kernel's point of view), and we can't print anything. 534 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check(). 535 */ 536 static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm) 537 { 538 return rcu_dereference_raw_notrace(kvm->memslots[0]); 539 } 540 541 extern void kvmppc_mmu_debugfs_init(struct kvm *kvm); 542 extern void kvmhv_radix_debugfs_init(struct kvm *kvm); 543 544 extern void kvmhv_rm_send_ipi(int cpu); 545 546 static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt) 547 { 548 /* HPTEs are 2**4 bytes long */ 549 return 1UL << (hpt->order - 4); 550 } 551 552 static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt) 553 { 554 /* 128 (2**7) bytes in each HPTEG */ 555 return (1UL << (hpt->order - 7)) - 1; 556 } 557 558 /* Set bits in a dirty bitmap, which is in LE format */ 559 static inline void set_dirty_bits(unsigned long *map, unsigned long i, 560 unsigned long npages) 561 { 562 563 if (npages >= 8) 564 memset((char *)map + i / 8, 0xff, npages / 8); 565 else 566 for (; npages; ++i, --npages) 567 __set_bit_le(i, map); 568 } 569 570 static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i, 571 unsigned long npages) 572 { 573 if (npages >= 8) 574 memset((char *)map + i / 8, 0xff, npages / 8); 575 else 576 for (; npages; ++i, --npages) 577 set_bit_le(i, map); 578 } 579 580 static inline u64 sanitize_msr(u64 msr) 581 { 582 msr &= ~MSR_HV; 583 msr |= MSR_ME; 584 return msr; 585 } 586 587 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 588 static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) 589 { 590 vcpu->arch.regs.ccr = vcpu->arch.cr_tm; 591 vcpu->arch.regs.xer = vcpu->arch.xer_tm; 592 vcpu->arch.regs.link = vcpu->arch.lr_tm; 593 vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; 594 vcpu->arch.amr = vcpu->arch.amr_tm; 595 vcpu->arch.ppr = vcpu->arch.ppr_tm; 596 vcpu->arch.dscr = vcpu->arch.dscr_tm; 597 vcpu->arch.tar = vcpu->arch.tar_tm; 598 memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm, 599 sizeof(vcpu->arch.regs.gpr)); 600 vcpu->arch.fp = vcpu->arch.fp_tm; 601 vcpu->arch.vr = vcpu->arch.vr_tm; 602 vcpu->arch.vrsave = vcpu->arch.vrsave_tm; 603 } 604 605 static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu) 606 { 607 vcpu->arch.cr_tm = vcpu->arch.regs.ccr; 608 vcpu->arch.xer_tm = vcpu->arch.regs.xer; 609 vcpu->arch.lr_tm = vcpu->arch.regs.link; 610 vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; 611 vcpu->arch.amr_tm = vcpu->arch.amr; 612 vcpu->arch.ppr_tm = vcpu->arch.ppr; 613 vcpu->arch.dscr_tm = vcpu->arch.dscr; 614 vcpu->arch.tar_tm = vcpu->arch.tar; 615 memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr, 616 sizeof(vcpu->arch.regs.gpr)); 617 vcpu->arch.fp_tm = vcpu->arch.fp; 618 vcpu->arch.vr_tm = vcpu->arch.vr; 619 vcpu->arch.vrsave_tm = vcpu->arch.vrsave; 620 } 621 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 622 623 extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, 624 unsigned long gpa, unsigned int level, 625 unsigned long mmu_seq, unsigned int lpid, 626 unsigned long *rmapp, struct rmap_nested **n_rmap); 627 extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp, 628 struct rmap_nested **n_rmap); 629 extern void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp, 630 unsigned long clr, unsigned long set, 631 unsigned long hpa, unsigned long nbytes); 632 extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm, 633 const struct kvm_memory_slot *memslot, 634 unsigned long gpa, unsigned long hpa, 635 unsigned long nbytes); 636 637 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 638 639 #endif /* __ASM_KVM_BOOK3S_64_H__ */ 640