1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007, 2011 4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/kernel.h> 9 #include <linux/errno.h> 10 #include <linux/gfp.h> 11 #include <linux/mm.h> 12 #include <linux/swap.h> 13 #include <linux/smp.h> 14 #include <linux/spinlock.h> 15 #include <linux/rcupdate.h> 16 #include <linux/slab.h> 17 #include <linux/swapops.h> 18 #include <linux/sysctl.h> 19 #include <linux/ksm.h> 20 #include <linux/mman.h> 21 22 #include <asm/tlb.h> 23 #include <asm/tlbflush.h> 24 #include <asm/mmu_context.h> 25 #include <asm/page-states.h> 26 27 static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, 28 pte_t *ptep, int nodat) 29 { 30 unsigned long opt, asce; 31 32 if (MACHINE_HAS_TLB_GUEST) { 33 opt = 0; 34 asce = READ_ONCE(mm->context.gmap_asce); 35 if (asce == 0UL || nodat) 36 opt |= IPTE_NODAT; 37 if (asce != -1UL) { 38 asce = asce ? : mm->context.asce; 39 opt |= IPTE_GUEST_ASCE; 40 } 41 __ptep_ipte(addr, ptep, opt, asce, IPTE_LOCAL); 42 } else { 43 __ptep_ipte(addr, ptep, 0, 0, IPTE_LOCAL); 44 } 45 } 46 47 static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr, 48 pte_t *ptep, int nodat) 49 { 50 unsigned long opt, asce; 51 52 if (MACHINE_HAS_TLB_GUEST) { 53 opt = 0; 54 asce = READ_ONCE(mm->context.gmap_asce); 55 if (asce == 0UL || nodat) 56 opt |= IPTE_NODAT; 57 if (asce != -1UL) { 58 asce = asce ? : mm->context.asce; 59 opt |= IPTE_GUEST_ASCE; 60 } 61 __ptep_ipte(addr, ptep, opt, asce, IPTE_GLOBAL); 62 } else { 63 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL); 64 } 65 } 66 67 static inline pte_t ptep_flush_direct(struct mm_struct *mm, 68 unsigned long addr, pte_t *ptep, 69 int nodat) 70 { 71 pte_t old; 72 73 old = *ptep; 74 if (unlikely(pte_val(old) & _PAGE_INVALID)) 75 return old; 76 atomic_inc(&mm->context.flush_count); 77 if (MACHINE_HAS_TLB_LC && 78 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) 79 ptep_ipte_local(mm, addr, ptep, nodat); 80 else 81 ptep_ipte_global(mm, addr, ptep, nodat); 82 atomic_dec(&mm->context.flush_count); 83 return old; 84 } 85 86 static inline pte_t ptep_flush_lazy(struct mm_struct *mm, 87 unsigned long addr, pte_t *ptep, 88 int nodat) 89 { 90 pte_t old; 91 92 old = *ptep; 93 if (unlikely(pte_val(old) & _PAGE_INVALID)) 94 return old; 95 atomic_inc(&mm->context.flush_count); 96 if (cpumask_equal(&mm->context.cpu_attach_mask, 97 cpumask_of(smp_processor_id()))) { 98 pte_val(*ptep) |= _PAGE_INVALID; 99 mm->context.flush_mm = 1; 100 } else 101 ptep_ipte_global(mm, addr, ptep, nodat); 102 atomic_dec(&mm->context.flush_count); 103 return old; 104 } 105 106 static inline pgste_t pgste_get_lock(pte_t *ptep) 107 { 108 unsigned long new = 0; 109 #ifdef CONFIG_PGSTE 110 unsigned long old; 111 112 asm( 113 " lg %0,%2\n" 114 "0: lgr %1,%0\n" 115 " nihh %0,0xff7f\n" /* clear PCL bit in old */ 116 " oihh %1,0x0080\n" /* set PCL bit in new */ 117 " csg %0,%1,%2\n" 118 " jl 0b\n" 119 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE]) 120 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory"); 121 #endif 122 return __pgste(new); 123 } 124 125 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste) 126 { 127 #ifdef CONFIG_PGSTE 128 asm( 129 " nihh %1,0xff7f\n" /* clear PCL bit */ 130 " stg %1,%0\n" 131 : "=Q" (ptep[PTRS_PER_PTE]) 132 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) 133 : "cc", "memory"); 134 #endif 135 } 136 137 static inline pgste_t pgste_get(pte_t *ptep) 138 { 139 unsigned long pgste = 0; 140 #ifdef CONFIG_PGSTE 141 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE); 142 #endif 143 return __pgste(pgste); 144 } 145 146 static inline void pgste_set(pte_t *ptep, pgste_t pgste) 147 { 148 #ifdef CONFIG_PGSTE 149 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste; 150 #endif 151 } 152 153 static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste, 154 struct mm_struct *mm) 155 { 156 #ifdef CONFIG_PGSTE 157 unsigned long address, bits, skey; 158 159 if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID) 160 return pgste; 161 address = pte_val(pte) & PAGE_MASK; 162 skey = (unsigned long) page_get_storage_key(address); 163 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); 164 /* Transfer page changed & referenced bit to guest bits in pgste */ 165 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */ 166 /* Copy page access key and fetch protection bit to pgste */ 167 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT); 168 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; 169 #endif 170 return pgste; 171 172 } 173 174 static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry, 175 struct mm_struct *mm) 176 { 177 #ifdef CONFIG_PGSTE 178 unsigned long address; 179 unsigned long nkey; 180 181 if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID) 182 return; 183 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); 184 address = pte_val(entry) & PAGE_MASK; 185 /* 186 * Set page access key and fetch protection bit from pgste. 187 * The guest C/R information is still in the PGSTE, set real 188 * key C/R to 0. 189 */ 190 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; 191 nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48; 192 page_set_storage_key(address, nkey, 0); 193 #endif 194 } 195 196 static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) 197 { 198 #ifdef CONFIG_PGSTE 199 if ((pte_val(entry) & _PAGE_PRESENT) && 200 (pte_val(entry) & _PAGE_WRITE) && 201 !(pte_val(entry) & _PAGE_INVALID)) { 202 if (!MACHINE_HAS_ESOP) { 203 /* 204 * Without enhanced suppression-on-protection force 205 * the dirty bit on for all writable ptes. 206 */ 207 pte_val(entry) |= _PAGE_DIRTY; 208 pte_val(entry) &= ~_PAGE_PROTECT; 209 } 210 if (!(pte_val(entry) & _PAGE_PROTECT)) 211 /* This pte allows write access, set user-dirty */ 212 pgste_val(pgste) |= PGSTE_UC_BIT; 213 } 214 #endif 215 *ptep = entry; 216 return pgste; 217 } 218 219 static inline pgste_t pgste_pte_notify(struct mm_struct *mm, 220 unsigned long addr, 221 pte_t *ptep, pgste_t pgste) 222 { 223 #ifdef CONFIG_PGSTE 224 unsigned long bits; 225 226 bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT); 227 if (bits) { 228 pgste_val(pgste) ^= bits; 229 ptep_notify(mm, addr, ptep, bits); 230 } 231 #endif 232 return pgste; 233 } 234 235 static inline pgste_t ptep_xchg_start(struct mm_struct *mm, 236 unsigned long addr, pte_t *ptep) 237 { 238 pgste_t pgste = __pgste(0); 239 240 if (mm_has_pgste(mm)) { 241 pgste = pgste_get_lock(ptep); 242 pgste = pgste_pte_notify(mm, addr, ptep, pgste); 243 } 244 return pgste; 245 } 246 247 static inline pte_t ptep_xchg_commit(struct mm_struct *mm, 248 unsigned long addr, pte_t *ptep, 249 pgste_t pgste, pte_t old, pte_t new) 250 { 251 if (mm_has_pgste(mm)) { 252 if (pte_val(old) & _PAGE_INVALID) 253 pgste_set_key(ptep, pgste, new, mm); 254 if (pte_val(new) & _PAGE_INVALID) { 255 pgste = pgste_update_all(old, pgste, mm); 256 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) == 257 _PGSTE_GPS_USAGE_UNUSED) 258 pte_val(old) |= _PAGE_UNUSED; 259 } 260 pgste = pgste_set_pte(ptep, pgste, new); 261 pgste_set_unlock(ptep, pgste); 262 } else { 263 *ptep = new; 264 } 265 return old; 266 } 267 268 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr, 269 pte_t *ptep, pte_t new) 270 { 271 pgste_t pgste; 272 pte_t old; 273 int nodat; 274 275 preempt_disable(); 276 pgste = ptep_xchg_start(mm, addr, ptep); 277 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT); 278 old = ptep_flush_direct(mm, addr, ptep, nodat); 279 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new); 280 preempt_enable(); 281 return old; 282 } 283 EXPORT_SYMBOL(ptep_xchg_direct); 284 285 pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr, 286 pte_t *ptep, pte_t new) 287 { 288 pgste_t pgste; 289 pte_t old; 290 int nodat; 291 292 preempt_disable(); 293 pgste = ptep_xchg_start(mm, addr, ptep); 294 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT); 295 old = ptep_flush_lazy(mm, addr, ptep, nodat); 296 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new); 297 preempt_enable(); 298 return old; 299 } 300 EXPORT_SYMBOL(ptep_xchg_lazy); 301 302 pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, 303 pte_t *ptep) 304 { 305 pgste_t pgste; 306 pte_t old; 307 int nodat; 308 struct mm_struct *mm = vma->vm_mm; 309 310 preempt_disable(); 311 pgste = ptep_xchg_start(mm, addr, ptep); 312 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT); 313 old = ptep_flush_lazy(mm, addr, ptep, nodat); 314 if (mm_has_pgste(mm)) { 315 pgste = pgste_update_all(old, pgste, mm); 316 pgste_set(ptep, pgste); 317 } 318 return old; 319 } 320 321 void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, 322 pte_t *ptep, pte_t old_pte, pte_t pte) 323 { 324 pgste_t pgste; 325 struct mm_struct *mm = vma->vm_mm; 326 327 if (!MACHINE_HAS_NX) 328 pte_val(pte) &= ~_PAGE_NOEXEC; 329 if (mm_has_pgste(mm)) { 330 pgste = pgste_get(ptep); 331 pgste_set_key(ptep, pgste, pte, mm); 332 pgste = pgste_set_pte(ptep, pgste, pte); 333 pgste_set_unlock(ptep, pgste); 334 } else { 335 *ptep = pte; 336 } 337 preempt_enable(); 338 } 339 340 static inline void pmdp_idte_local(struct mm_struct *mm, 341 unsigned long addr, pmd_t *pmdp) 342 { 343 if (MACHINE_HAS_TLB_GUEST) 344 __pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE, 345 mm->context.asce, IDTE_LOCAL); 346 else 347 __pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL); 348 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) 349 gmap_pmdp_idte_local(mm, addr); 350 } 351 352 static inline void pmdp_idte_global(struct mm_struct *mm, 353 unsigned long addr, pmd_t *pmdp) 354 { 355 if (MACHINE_HAS_TLB_GUEST) { 356 __pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE, 357 mm->context.asce, IDTE_GLOBAL); 358 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) 359 gmap_pmdp_idte_global(mm, addr); 360 } else if (MACHINE_HAS_IDTE) { 361 __pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL); 362 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) 363 gmap_pmdp_idte_global(mm, addr); 364 } else { 365 __pmdp_csp(pmdp); 366 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) 367 gmap_pmdp_csp(mm, addr); 368 } 369 } 370 371 static inline pmd_t pmdp_flush_direct(struct mm_struct *mm, 372 unsigned long addr, pmd_t *pmdp) 373 { 374 pmd_t old; 375 376 old = *pmdp; 377 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID) 378 return old; 379 atomic_inc(&mm->context.flush_count); 380 if (MACHINE_HAS_TLB_LC && 381 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) 382 pmdp_idte_local(mm, addr, pmdp); 383 else 384 pmdp_idte_global(mm, addr, pmdp); 385 atomic_dec(&mm->context.flush_count); 386 return old; 387 } 388 389 static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm, 390 unsigned long addr, pmd_t *pmdp) 391 { 392 pmd_t old; 393 394 old = *pmdp; 395 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID) 396 return old; 397 atomic_inc(&mm->context.flush_count); 398 if (cpumask_equal(&mm->context.cpu_attach_mask, 399 cpumask_of(smp_processor_id()))) { 400 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; 401 mm->context.flush_mm = 1; 402 if (mm_has_pgste(mm)) 403 gmap_pmdp_invalidate(mm, addr); 404 } else { 405 pmdp_idte_global(mm, addr, pmdp); 406 } 407 atomic_dec(&mm->context.flush_count); 408 return old; 409 } 410 411 #ifdef CONFIG_PGSTE 412 static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr) 413 { 414 pgd_t *pgd; 415 p4d_t *p4d; 416 pud_t *pud; 417 pmd_t *pmd; 418 419 pgd = pgd_offset(mm, addr); 420 p4d = p4d_alloc(mm, pgd, addr); 421 if (!p4d) 422 return NULL; 423 pud = pud_alloc(mm, p4d, addr); 424 if (!pud) 425 return NULL; 426 pmd = pmd_alloc(mm, pud, addr); 427 return pmd; 428 } 429 #endif 430 431 pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr, 432 pmd_t *pmdp, pmd_t new) 433 { 434 pmd_t old; 435 436 preempt_disable(); 437 old = pmdp_flush_direct(mm, addr, pmdp); 438 *pmdp = new; 439 preempt_enable(); 440 return old; 441 } 442 EXPORT_SYMBOL(pmdp_xchg_direct); 443 444 pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr, 445 pmd_t *pmdp, pmd_t new) 446 { 447 pmd_t old; 448 449 preempt_disable(); 450 old = pmdp_flush_lazy(mm, addr, pmdp); 451 *pmdp = new; 452 preempt_enable(); 453 return old; 454 } 455 EXPORT_SYMBOL(pmdp_xchg_lazy); 456 457 static inline void pudp_idte_local(struct mm_struct *mm, 458 unsigned long addr, pud_t *pudp) 459 { 460 if (MACHINE_HAS_TLB_GUEST) 461 __pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE, 462 mm->context.asce, IDTE_LOCAL); 463 else 464 __pudp_idte(addr, pudp, 0, 0, IDTE_LOCAL); 465 } 466 467 static inline void pudp_idte_global(struct mm_struct *mm, 468 unsigned long addr, pud_t *pudp) 469 { 470 if (MACHINE_HAS_TLB_GUEST) 471 __pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE, 472 mm->context.asce, IDTE_GLOBAL); 473 else if (MACHINE_HAS_IDTE) 474 __pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL); 475 else 476 /* 477 * Invalid bit position is the same for pmd and pud, so we can 478 * re-use _pmd_csp() here 479 */ 480 __pmdp_csp((pmd_t *) pudp); 481 } 482 483 static inline pud_t pudp_flush_direct(struct mm_struct *mm, 484 unsigned long addr, pud_t *pudp) 485 { 486 pud_t old; 487 488 old = *pudp; 489 if (pud_val(old) & _REGION_ENTRY_INVALID) 490 return old; 491 atomic_inc(&mm->context.flush_count); 492 if (MACHINE_HAS_TLB_LC && 493 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) 494 pudp_idte_local(mm, addr, pudp); 495 else 496 pudp_idte_global(mm, addr, pudp); 497 atomic_dec(&mm->context.flush_count); 498 return old; 499 } 500 501 pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr, 502 pud_t *pudp, pud_t new) 503 { 504 pud_t old; 505 506 preempt_disable(); 507 old = pudp_flush_direct(mm, addr, pudp); 508 *pudp = new; 509 preempt_enable(); 510 return old; 511 } 512 EXPORT_SYMBOL(pudp_xchg_direct); 513 514 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 515 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 516 pgtable_t pgtable) 517 { 518 struct list_head *lh = (struct list_head *) pgtable; 519 520 assert_spin_locked(pmd_lockptr(mm, pmdp)); 521 522 /* FIFO */ 523 if (!pmd_huge_pte(mm, pmdp)) 524 INIT_LIST_HEAD(lh); 525 else 526 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); 527 pmd_huge_pte(mm, pmdp) = pgtable; 528 } 529 530 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 531 { 532 struct list_head *lh; 533 pgtable_t pgtable; 534 pte_t *ptep; 535 536 assert_spin_locked(pmd_lockptr(mm, pmdp)); 537 538 /* FIFO */ 539 pgtable = pmd_huge_pte(mm, pmdp); 540 lh = (struct list_head *) pgtable; 541 if (list_empty(lh)) 542 pmd_huge_pte(mm, pmdp) = NULL; 543 else { 544 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; 545 list_del(lh); 546 } 547 ptep = (pte_t *) pgtable; 548 pte_val(*ptep) = _PAGE_INVALID; 549 ptep++; 550 pte_val(*ptep) = _PAGE_INVALID; 551 return pgtable; 552 } 553 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 554 555 #ifdef CONFIG_PGSTE 556 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr, 557 pte_t *ptep, pte_t entry) 558 { 559 pgste_t pgste; 560 561 /* the mm_has_pgste() check is done in set_pte_at() */ 562 preempt_disable(); 563 pgste = pgste_get_lock(ptep); 564 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; 565 pgste_set_key(ptep, pgste, entry, mm); 566 pgste = pgste_set_pte(ptep, pgste, entry); 567 pgste_set_unlock(ptep, pgste); 568 preempt_enable(); 569 } 570 571 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 572 { 573 pgste_t pgste; 574 575 preempt_disable(); 576 pgste = pgste_get_lock(ptep); 577 pgste_val(pgste) |= PGSTE_IN_BIT; 578 pgste_set_unlock(ptep, pgste); 579 preempt_enable(); 580 } 581 582 /** 583 * ptep_force_prot - change access rights of a locked pte 584 * @mm: pointer to the process mm_struct 585 * @addr: virtual address in the guest address space 586 * @ptep: pointer to the page table entry 587 * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE 588 * @bit: pgste bit to set (e.g. for notification) 589 * 590 * Returns 0 if the access rights were changed and -EAGAIN if the current 591 * and requested access rights are incompatible. 592 */ 593 int ptep_force_prot(struct mm_struct *mm, unsigned long addr, 594 pte_t *ptep, int prot, unsigned long bit) 595 { 596 pte_t entry; 597 pgste_t pgste; 598 int pte_i, pte_p, nodat; 599 600 pgste = pgste_get_lock(ptep); 601 entry = *ptep; 602 /* Check pte entry after all locks have been acquired */ 603 pte_i = pte_val(entry) & _PAGE_INVALID; 604 pte_p = pte_val(entry) & _PAGE_PROTECT; 605 if ((pte_i && (prot != PROT_NONE)) || 606 (pte_p && (prot & PROT_WRITE))) { 607 pgste_set_unlock(ptep, pgste); 608 return -EAGAIN; 609 } 610 /* Change access rights and set pgste bit */ 611 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT); 612 if (prot == PROT_NONE && !pte_i) { 613 ptep_flush_direct(mm, addr, ptep, nodat); 614 pgste = pgste_update_all(entry, pgste, mm); 615 pte_val(entry) |= _PAGE_INVALID; 616 } 617 if (prot == PROT_READ && !pte_p) { 618 ptep_flush_direct(mm, addr, ptep, nodat); 619 pte_val(entry) &= ~_PAGE_INVALID; 620 pte_val(entry) |= _PAGE_PROTECT; 621 } 622 pgste_val(pgste) |= bit; 623 pgste = pgste_set_pte(ptep, pgste, entry); 624 pgste_set_unlock(ptep, pgste); 625 return 0; 626 } 627 628 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, 629 pte_t *sptep, pte_t *tptep, pte_t pte) 630 { 631 pgste_t spgste, tpgste; 632 pte_t spte, tpte; 633 int rc = -EAGAIN; 634 635 if (!(pte_val(*tptep) & _PAGE_INVALID)) 636 return 0; /* already shadowed */ 637 spgste = pgste_get_lock(sptep); 638 spte = *sptep; 639 if (!(pte_val(spte) & _PAGE_INVALID) && 640 !((pte_val(spte) & _PAGE_PROTECT) && 641 !(pte_val(pte) & _PAGE_PROTECT))) { 642 pgste_val(spgste) |= PGSTE_VSIE_BIT; 643 tpgste = pgste_get_lock(tptep); 644 pte_val(tpte) = (pte_val(spte) & PAGE_MASK) | 645 (pte_val(pte) & _PAGE_PROTECT); 646 /* don't touch the storage key - it belongs to parent pgste */ 647 tpgste = pgste_set_pte(tptep, tpgste, tpte); 648 pgste_set_unlock(tptep, tpgste); 649 rc = 1; 650 } 651 pgste_set_unlock(sptep, spgste); 652 return rc; 653 } 654 655 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep) 656 { 657 pgste_t pgste; 658 int nodat; 659 660 pgste = pgste_get_lock(ptep); 661 /* notifier is called by the caller */ 662 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT); 663 ptep_flush_direct(mm, saddr, ptep, nodat); 664 /* don't touch the storage key - it belongs to parent pgste */ 665 pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID)); 666 pgste_set_unlock(ptep, pgste); 667 } 668 669 static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry) 670 { 671 if (!non_swap_entry(entry)) 672 dec_mm_counter(mm, MM_SWAPENTS); 673 else if (is_migration_entry(entry)) { 674 struct page *page = migration_entry_to_page(entry); 675 676 dec_mm_counter(mm, mm_counter(page)); 677 } 678 free_swap_and_cache(entry); 679 } 680 681 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, 682 pte_t *ptep, int reset) 683 { 684 unsigned long pgstev; 685 pgste_t pgste; 686 pte_t pte; 687 688 /* Zap unused and logically-zero pages */ 689 preempt_disable(); 690 pgste = pgste_get_lock(ptep); 691 pgstev = pgste_val(pgste); 692 pte = *ptep; 693 if (!reset && pte_swap(pte) && 694 ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED || 695 (pgstev & _PGSTE_GPS_ZERO))) { 696 ptep_zap_swap_entry(mm, pte_to_swp_entry(pte)); 697 pte_clear(mm, addr, ptep); 698 } 699 if (reset) 700 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; 701 pgste_set_unlock(ptep, pgste); 702 preempt_enable(); 703 } 704 705 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 706 { 707 unsigned long ptev; 708 pgste_t pgste; 709 710 /* Clear storage key ACC and F, but set R/C */ 711 preempt_disable(); 712 pgste = pgste_get_lock(ptep); 713 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT); 714 pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT; 715 ptev = pte_val(*ptep); 716 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE)) 717 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1); 718 pgste_set_unlock(ptep, pgste); 719 preempt_enable(); 720 } 721 722 /* 723 * Test and reset if a guest page is dirty 724 */ 725 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr, 726 pte_t *ptep) 727 { 728 pgste_t pgste; 729 pte_t pte; 730 bool dirty; 731 int nodat; 732 733 pgste = pgste_get_lock(ptep); 734 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT); 735 pgste_val(pgste) &= ~PGSTE_UC_BIT; 736 pte = *ptep; 737 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) { 738 pgste = pgste_pte_notify(mm, addr, ptep, pgste); 739 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT); 740 ptep_ipte_global(mm, addr, ptep, nodat); 741 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE)) 742 pte_val(pte) |= _PAGE_PROTECT; 743 else 744 pte_val(pte) |= _PAGE_INVALID; 745 *ptep = pte; 746 } 747 pgste_set_unlock(ptep, pgste); 748 return dirty; 749 } 750 EXPORT_SYMBOL_GPL(ptep_test_and_clear_uc); 751 752 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 753 unsigned char key, bool nq) 754 { 755 unsigned long keyul, paddr; 756 spinlock_t *ptl; 757 pgste_t old, new; 758 pmd_t *pmdp; 759 pte_t *ptep; 760 761 pmdp = pmd_alloc_map(mm, addr); 762 if (unlikely(!pmdp)) 763 return -EFAULT; 764 765 ptl = pmd_lock(mm, pmdp); 766 if (!pmd_present(*pmdp)) { 767 spin_unlock(ptl); 768 return -EFAULT; 769 } 770 771 if (pmd_large(*pmdp)) { 772 paddr = pmd_val(*pmdp) & HPAGE_MASK; 773 paddr |= addr & ~HPAGE_MASK; 774 /* 775 * Huge pmds need quiescing operations, they are 776 * always mapped. 777 */ 778 page_set_storage_key(paddr, key, 1); 779 spin_unlock(ptl); 780 return 0; 781 } 782 spin_unlock(ptl); 783 784 ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl); 785 if (unlikely(!ptep)) 786 return -EFAULT; 787 788 new = old = pgste_get_lock(ptep); 789 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT | 790 PGSTE_ACC_BITS | PGSTE_FP_BIT); 791 keyul = (unsigned long) key; 792 pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48; 793 pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; 794 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 795 unsigned long bits, skey; 796 797 paddr = pte_val(*ptep) & PAGE_MASK; 798 skey = (unsigned long) page_get_storage_key(paddr); 799 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); 800 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT); 801 /* Set storage key ACC and FP */ 802 page_set_storage_key(paddr, skey, !nq); 803 /* Merge host changed & referenced into pgste */ 804 pgste_val(new) |= bits << 52; 805 } 806 /* changing the guest storage key is considered a change of the page */ 807 if ((pgste_val(new) ^ pgste_val(old)) & 808 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT)) 809 pgste_val(new) |= PGSTE_UC_BIT; 810 811 pgste_set_unlock(ptep, new); 812 pte_unmap_unlock(ptep, ptl); 813 return 0; 814 } 815 EXPORT_SYMBOL(set_guest_storage_key); 816 817 /** 818 * Conditionally set a guest storage key (handling csske). 819 * oldkey will be updated when either mr or mc is set and a pointer is given. 820 * 821 * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest 822 * storage key was updated and -EFAULT on access errors. 823 */ 824 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 825 unsigned char key, unsigned char *oldkey, 826 bool nq, bool mr, bool mc) 827 { 828 unsigned char tmp, mask = _PAGE_ACC_BITS | _PAGE_FP_BIT; 829 int rc; 830 831 /* we can drop the pgste lock between getting and setting the key */ 832 if (mr | mc) { 833 rc = get_guest_storage_key(current->mm, addr, &tmp); 834 if (rc) 835 return rc; 836 if (oldkey) 837 *oldkey = tmp; 838 if (!mr) 839 mask |= _PAGE_REFERENCED; 840 if (!mc) 841 mask |= _PAGE_CHANGED; 842 if (!((tmp ^ key) & mask)) 843 return 0; 844 } 845 rc = set_guest_storage_key(current->mm, addr, key, nq); 846 return rc < 0 ? rc : 1; 847 } 848 EXPORT_SYMBOL(cond_set_guest_storage_key); 849 850 /** 851 * Reset a guest reference bit (rrbe), returning the reference and changed bit. 852 * 853 * Returns < 0 in case of error, otherwise the cc to be reported to the guest. 854 */ 855 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr) 856 { 857 spinlock_t *ptl; 858 unsigned long paddr; 859 pgste_t old, new; 860 pmd_t *pmdp; 861 pte_t *ptep; 862 int cc = 0; 863 864 pmdp = pmd_alloc_map(mm, addr); 865 if (unlikely(!pmdp)) 866 return -EFAULT; 867 868 ptl = pmd_lock(mm, pmdp); 869 if (!pmd_present(*pmdp)) { 870 spin_unlock(ptl); 871 return -EFAULT; 872 } 873 874 if (pmd_large(*pmdp)) { 875 paddr = pmd_val(*pmdp) & HPAGE_MASK; 876 paddr |= addr & ~HPAGE_MASK; 877 cc = page_reset_referenced(paddr); 878 spin_unlock(ptl); 879 return cc; 880 } 881 spin_unlock(ptl); 882 883 ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl); 884 if (unlikely(!ptep)) 885 return -EFAULT; 886 887 new = old = pgste_get_lock(ptep); 888 /* Reset guest reference bit only */ 889 pgste_val(new) &= ~PGSTE_GR_BIT; 890 891 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 892 paddr = pte_val(*ptep) & PAGE_MASK; 893 cc = page_reset_referenced(paddr); 894 /* Merge real referenced bit into host-set */ 895 pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT; 896 } 897 /* Reflect guest's logical view, not physical */ 898 cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49; 899 /* Changing the guest storage key is considered a change of the page */ 900 if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT) 901 pgste_val(new) |= PGSTE_UC_BIT; 902 903 pgste_set_unlock(ptep, new); 904 pte_unmap_unlock(ptep, ptl); 905 return cc; 906 } 907 EXPORT_SYMBOL(reset_guest_reference_bit); 908 909 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, 910 unsigned char *key) 911 { 912 unsigned long paddr; 913 spinlock_t *ptl; 914 pgste_t pgste; 915 pmd_t *pmdp; 916 pte_t *ptep; 917 918 pmdp = pmd_alloc_map(mm, addr); 919 if (unlikely(!pmdp)) 920 return -EFAULT; 921 922 ptl = pmd_lock(mm, pmdp); 923 if (!pmd_present(*pmdp)) { 924 /* Not yet mapped memory has a zero key */ 925 spin_unlock(ptl); 926 *key = 0; 927 return 0; 928 } 929 930 if (pmd_large(*pmdp)) { 931 paddr = pmd_val(*pmdp) & HPAGE_MASK; 932 paddr |= addr & ~HPAGE_MASK; 933 *key = page_get_storage_key(paddr); 934 spin_unlock(ptl); 935 return 0; 936 } 937 spin_unlock(ptl); 938 939 ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl); 940 if (unlikely(!ptep)) 941 return -EFAULT; 942 943 pgste = pgste_get_lock(ptep); 944 *key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; 945 paddr = pte_val(*ptep) & PAGE_MASK; 946 if (!(pte_val(*ptep) & _PAGE_INVALID)) 947 *key = page_get_storage_key(paddr); 948 /* Reflect guest's logical view, not physical */ 949 *key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48; 950 pgste_set_unlock(ptep, pgste); 951 pte_unmap_unlock(ptep, ptl); 952 return 0; 953 } 954 EXPORT_SYMBOL(get_guest_storage_key); 955 956 /** 957 * pgste_perform_essa - perform ESSA actions on the PGSTE. 958 * @mm: the memory context. It must have PGSTEs, no check is performed here! 959 * @hva: the host virtual address of the page whose PGSTE is to be processed 960 * @orc: the specific action to perform, see the ESSA_SET_* macros. 961 * @oldpte: the PTE will be saved there if the pointer is not NULL. 962 * @oldpgste: the old PGSTE will be saved there if the pointer is not NULL. 963 * 964 * Return: 1 if the page is to be added to the CBRL, otherwise 0, 965 * or < 0 in case of error. -EINVAL is returned for invalid values 966 * of orc, -EFAULT for invalid addresses. 967 */ 968 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, 969 unsigned long *oldpte, unsigned long *oldpgste) 970 { 971 unsigned long pgstev; 972 spinlock_t *ptl; 973 pgste_t pgste; 974 pte_t *ptep; 975 int res = 0; 976 977 WARN_ON_ONCE(orc > ESSA_MAX); 978 if (unlikely(orc > ESSA_MAX)) 979 return -EINVAL; 980 ptep = get_locked_pte(mm, hva, &ptl); 981 if (unlikely(!ptep)) 982 return -EFAULT; 983 pgste = pgste_get_lock(ptep); 984 pgstev = pgste_val(pgste); 985 if (oldpte) 986 *oldpte = pte_val(*ptep); 987 if (oldpgste) 988 *oldpgste = pgstev; 989 990 switch (orc) { 991 case ESSA_GET_STATE: 992 break; 993 case ESSA_SET_STABLE: 994 pgstev &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT); 995 pgstev |= _PGSTE_GPS_USAGE_STABLE; 996 break; 997 case ESSA_SET_UNUSED: 998 pgstev &= ~_PGSTE_GPS_USAGE_MASK; 999 pgstev |= _PGSTE_GPS_USAGE_UNUSED; 1000 if (pte_val(*ptep) & _PAGE_INVALID) 1001 res = 1; 1002 break; 1003 case ESSA_SET_VOLATILE: 1004 pgstev &= ~_PGSTE_GPS_USAGE_MASK; 1005 pgstev |= _PGSTE_GPS_USAGE_VOLATILE; 1006 if (pte_val(*ptep) & _PAGE_INVALID) 1007 res = 1; 1008 break; 1009 case ESSA_SET_POT_VOLATILE: 1010 pgstev &= ~_PGSTE_GPS_USAGE_MASK; 1011 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 1012 pgstev |= _PGSTE_GPS_USAGE_POT_VOLATILE; 1013 break; 1014 } 1015 if (pgstev & _PGSTE_GPS_ZERO) { 1016 pgstev |= _PGSTE_GPS_USAGE_VOLATILE; 1017 break; 1018 } 1019 if (!(pgstev & PGSTE_GC_BIT)) { 1020 pgstev |= _PGSTE_GPS_USAGE_VOLATILE; 1021 res = 1; 1022 break; 1023 } 1024 break; 1025 case ESSA_SET_STABLE_RESIDENT: 1026 pgstev &= ~_PGSTE_GPS_USAGE_MASK; 1027 pgstev |= _PGSTE_GPS_USAGE_STABLE; 1028 /* 1029 * Since the resident state can go away any time after this 1030 * call, we will not make this page resident. We can revisit 1031 * this decision if a guest will ever start using this. 1032 */ 1033 break; 1034 case ESSA_SET_STABLE_IF_RESIDENT: 1035 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 1036 pgstev &= ~_PGSTE_GPS_USAGE_MASK; 1037 pgstev |= _PGSTE_GPS_USAGE_STABLE; 1038 } 1039 break; 1040 case ESSA_SET_STABLE_NODAT: 1041 pgstev &= ~_PGSTE_GPS_USAGE_MASK; 1042 pgstev |= _PGSTE_GPS_USAGE_STABLE | _PGSTE_GPS_NODAT; 1043 break; 1044 default: 1045 /* we should never get here! */ 1046 break; 1047 } 1048 /* If we are discarding a page, set it to logical zero */ 1049 if (res) 1050 pgstev |= _PGSTE_GPS_ZERO; 1051 1052 pgste_val(pgste) = pgstev; 1053 pgste_set_unlock(ptep, pgste); 1054 pte_unmap_unlock(ptep, ptl); 1055 return res; 1056 } 1057 EXPORT_SYMBOL(pgste_perform_essa); 1058 1059 /** 1060 * set_pgste_bits - set specific PGSTE bits. 1061 * @mm: the memory context. It must have PGSTEs, no check is performed here! 1062 * @hva: the host virtual address of the page whose PGSTE is to be processed 1063 * @bits: a bitmask representing the bits that will be touched 1064 * @value: the values of the bits to be written. Only the bits in the mask 1065 * will be written. 1066 * 1067 * Return: 0 on success, < 0 in case of error. 1068 */ 1069 int set_pgste_bits(struct mm_struct *mm, unsigned long hva, 1070 unsigned long bits, unsigned long value) 1071 { 1072 spinlock_t *ptl; 1073 pgste_t new; 1074 pte_t *ptep; 1075 1076 ptep = get_locked_pte(mm, hva, &ptl); 1077 if (unlikely(!ptep)) 1078 return -EFAULT; 1079 new = pgste_get_lock(ptep); 1080 1081 pgste_val(new) &= ~bits; 1082 pgste_val(new) |= value & bits; 1083 1084 pgste_set_unlock(ptep, new); 1085 pte_unmap_unlock(ptep, ptl); 1086 return 0; 1087 } 1088 EXPORT_SYMBOL(set_pgste_bits); 1089 1090 /** 1091 * get_pgste - get the current PGSTE for the given address. 1092 * @mm: the memory context. It must have PGSTEs, no check is performed here! 1093 * @hva: the host virtual address of the page whose PGSTE is to be processed 1094 * @pgstep: will be written with the current PGSTE for the given address. 1095 * 1096 * Return: 0 on success, < 0 in case of error. 1097 */ 1098 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep) 1099 { 1100 spinlock_t *ptl; 1101 pte_t *ptep; 1102 1103 ptep = get_locked_pte(mm, hva, &ptl); 1104 if (unlikely(!ptep)) 1105 return -EFAULT; 1106 *pgstep = pgste_val(pgste_get(ptep)); 1107 pte_unmap_unlock(ptep, ptl); 1108 return 0; 1109 } 1110 EXPORT_SYMBOL(get_pgste); 1111 #endif 1112