1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * KVM guest address space mapping code 4 * 5 * Copyright IBM Corp. 2007, 2020 6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * David Hildenbrand <david@redhat.com> 8 * Janosch Frank <frankja@linux.vnet.ibm.com> 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/pagewalk.h> 13 #include <linux/swap.h> 14 #include <linux/smp.h> 15 #include <linux/spinlock.h> 16 #include <linux/slab.h> 17 #include <linux/swapops.h> 18 #include <linux/ksm.h> 19 #include <linux/mman.h> 20 #include <linux/pgtable.h> 21 #include <asm/page-states.h> 22 #include <asm/pgalloc.h> 23 #include <asm/gmap.h> 24 #include <asm/page.h> 25 #include <asm/tlb.h> 26 27 #define GMAP_SHADOW_FAKE_TABLE 1ULL 28 29 static struct page *gmap_alloc_crst(void) 30 { 31 struct page *page; 32 33 page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); 34 if (!page) 35 return NULL; 36 __arch_set_page_dat(page_to_virt(page), 1UL << CRST_ALLOC_ORDER); 37 return page; 38 } 39 40 /** 41 * gmap_alloc - allocate and initialize a guest address space 42 * @limit: maximum address of the gmap address space 43 * 44 * Returns a guest address space structure. 45 */ 46 static struct gmap *gmap_alloc(unsigned long limit) 47 { 48 struct gmap *gmap; 49 struct page *page; 50 unsigned long *table; 51 unsigned long etype, atype; 52 53 if (limit < _REGION3_SIZE) { 54 limit = _REGION3_SIZE - 1; 55 atype = _ASCE_TYPE_SEGMENT; 56 etype = _SEGMENT_ENTRY_EMPTY; 57 } else if (limit < _REGION2_SIZE) { 58 limit = _REGION2_SIZE - 1; 59 atype = _ASCE_TYPE_REGION3; 60 etype = _REGION3_ENTRY_EMPTY; 61 } else if (limit < _REGION1_SIZE) { 62 limit = _REGION1_SIZE - 1; 63 atype = _ASCE_TYPE_REGION2; 64 etype = _REGION2_ENTRY_EMPTY; 65 } else { 66 limit = -1UL; 67 atype = _ASCE_TYPE_REGION1; 68 etype = _REGION1_ENTRY_EMPTY; 69 } 70 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL_ACCOUNT); 71 if (!gmap) 72 goto out; 73 INIT_LIST_HEAD(&gmap->crst_list); 74 INIT_LIST_HEAD(&gmap->children); 75 INIT_LIST_HEAD(&gmap->pt_list); 76 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL_ACCOUNT); 77 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC | __GFP_ACCOUNT); 78 INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC | __GFP_ACCOUNT); 79 spin_lock_init(&gmap->guest_table_lock); 80 spin_lock_init(&gmap->shadow_lock); 81 refcount_set(&gmap->ref_count, 1); 82 page = gmap_alloc_crst(); 83 if (!page) 84 goto out_free; 85 page->index = 0; 86 list_add(&page->lru, &gmap->crst_list); 87 table = page_to_virt(page); 88 crst_table_init(table, etype); 89 gmap->table = table; 90 gmap->asce = atype | _ASCE_TABLE_LENGTH | 91 _ASCE_USER_BITS | __pa(table); 92 gmap->asce_end = limit; 93 return gmap; 94 95 out_free: 96 kfree(gmap); 97 out: 98 return NULL; 99 } 100 101 /** 102 * gmap_create - create a guest address space 103 * @mm: pointer to the parent mm_struct 104 * @limit: maximum size of the gmap address space 105 * 106 * Returns a guest address space structure. 107 */ 108 struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit) 109 { 110 struct gmap *gmap; 111 unsigned long gmap_asce; 112 113 gmap = gmap_alloc(limit); 114 if (!gmap) 115 return NULL; 116 gmap->mm = mm; 117 spin_lock(&mm->context.lock); 118 list_add_rcu(&gmap->list, &mm->context.gmap_list); 119 if (list_is_singular(&mm->context.gmap_list)) 120 gmap_asce = gmap->asce; 121 else 122 gmap_asce = -1UL; 123 WRITE_ONCE(mm->context.gmap_asce, gmap_asce); 124 spin_unlock(&mm->context.lock); 125 return gmap; 126 } 127 EXPORT_SYMBOL_GPL(gmap_create); 128 129 static void gmap_flush_tlb(struct gmap *gmap) 130 { 131 if (MACHINE_HAS_IDTE) 132 __tlb_flush_idte(gmap->asce); 133 else 134 __tlb_flush_global(); 135 } 136 137 static void gmap_radix_tree_free(struct radix_tree_root *root) 138 { 139 struct radix_tree_iter iter; 140 unsigned long indices[16]; 141 unsigned long index; 142 void __rcu **slot; 143 int i, nr; 144 145 /* A radix tree is freed by deleting all of its entries */ 146 index = 0; 147 do { 148 nr = 0; 149 radix_tree_for_each_slot(slot, root, &iter, index) { 150 indices[nr] = iter.index; 151 if (++nr == 16) 152 break; 153 } 154 for (i = 0; i < nr; i++) { 155 index = indices[i]; 156 radix_tree_delete(root, index); 157 } 158 } while (nr > 0); 159 } 160 161 static void gmap_rmap_radix_tree_free(struct radix_tree_root *root) 162 { 163 struct gmap_rmap *rmap, *rnext, *head; 164 struct radix_tree_iter iter; 165 unsigned long indices[16]; 166 unsigned long index; 167 void __rcu **slot; 168 int i, nr; 169 170 /* A radix tree is freed by deleting all of its entries */ 171 index = 0; 172 do { 173 nr = 0; 174 radix_tree_for_each_slot(slot, root, &iter, index) { 175 indices[nr] = iter.index; 176 if (++nr == 16) 177 break; 178 } 179 for (i = 0; i < nr; i++) { 180 index = indices[i]; 181 head = radix_tree_delete(root, index); 182 gmap_for_each_rmap_safe(rmap, rnext, head) 183 kfree(rmap); 184 } 185 } while (nr > 0); 186 } 187 188 /** 189 * gmap_free - free a guest address space 190 * @gmap: pointer to the guest address space structure 191 * 192 * No locks required. There are no references to this gmap anymore. 193 */ 194 static void gmap_free(struct gmap *gmap) 195 { 196 struct page *page, *next; 197 198 /* Flush tlb of all gmaps (if not already done for shadows) */ 199 if (!(gmap_is_shadow(gmap) && gmap->removed)) 200 gmap_flush_tlb(gmap); 201 /* Free all segment & region tables. */ 202 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) 203 __free_pages(page, CRST_ALLOC_ORDER); 204 gmap_radix_tree_free(&gmap->guest_to_host); 205 gmap_radix_tree_free(&gmap->host_to_guest); 206 207 /* Free additional data for a shadow gmap */ 208 if (gmap_is_shadow(gmap)) { 209 struct ptdesc *ptdesc, *n; 210 211 /* Free all page tables. */ 212 list_for_each_entry_safe(ptdesc, n, &gmap->pt_list, pt_list) 213 page_table_free_pgste(ptdesc); 214 gmap_rmap_radix_tree_free(&gmap->host_to_rmap); 215 /* Release reference to the parent */ 216 gmap_put(gmap->parent); 217 } 218 219 kfree(gmap); 220 } 221 222 /** 223 * gmap_get - increase reference counter for guest address space 224 * @gmap: pointer to the guest address space structure 225 * 226 * Returns the gmap pointer 227 */ 228 struct gmap *gmap_get(struct gmap *gmap) 229 { 230 refcount_inc(&gmap->ref_count); 231 return gmap; 232 } 233 EXPORT_SYMBOL_GPL(gmap_get); 234 235 /** 236 * gmap_put - decrease reference counter for guest address space 237 * @gmap: pointer to the guest address space structure 238 * 239 * If the reference counter reaches zero the guest address space is freed. 240 */ 241 void gmap_put(struct gmap *gmap) 242 { 243 if (refcount_dec_and_test(&gmap->ref_count)) 244 gmap_free(gmap); 245 } 246 EXPORT_SYMBOL_GPL(gmap_put); 247 248 /** 249 * gmap_remove - remove a guest address space but do not free it yet 250 * @gmap: pointer to the guest address space structure 251 */ 252 void gmap_remove(struct gmap *gmap) 253 { 254 struct gmap *sg, *next; 255 unsigned long gmap_asce; 256 257 /* Remove all shadow gmaps linked to this gmap */ 258 if (!list_empty(&gmap->children)) { 259 spin_lock(&gmap->shadow_lock); 260 list_for_each_entry_safe(sg, next, &gmap->children, list) { 261 list_del(&sg->list); 262 gmap_put(sg); 263 } 264 spin_unlock(&gmap->shadow_lock); 265 } 266 /* Remove gmap from the pre-mm list */ 267 spin_lock(&gmap->mm->context.lock); 268 list_del_rcu(&gmap->list); 269 if (list_empty(&gmap->mm->context.gmap_list)) 270 gmap_asce = 0; 271 else if (list_is_singular(&gmap->mm->context.gmap_list)) 272 gmap_asce = list_first_entry(&gmap->mm->context.gmap_list, 273 struct gmap, list)->asce; 274 else 275 gmap_asce = -1UL; 276 WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce); 277 spin_unlock(&gmap->mm->context.lock); 278 synchronize_rcu(); 279 /* Put reference */ 280 gmap_put(gmap); 281 } 282 EXPORT_SYMBOL_GPL(gmap_remove); 283 284 /* 285 * gmap_alloc_table is assumed to be called with mmap_lock held 286 */ 287 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table, 288 unsigned long init, unsigned long gaddr) 289 { 290 struct page *page; 291 unsigned long *new; 292 293 /* since we dont free the gmap table until gmap_free we can unlock */ 294 page = gmap_alloc_crst(); 295 if (!page) 296 return -ENOMEM; 297 new = page_to_virt(page); 298 crst_table_init(new, init); 299 spin_lock(&gmap->guest_table_lock); 300 if (*table & _REGION_ENTRY_INVALID) { 301 list_add(&page->lru, &gmap->crst_list); 302 *table = __pa(new) | _REGION_ENTRY_LENGTH | 303 (*table & _REGION_ENTRY_TYPE_MASK); 304 page->index = gaddr; 305 page = NULL; 306 } 307 spin_unlock(&gmap->guest_table_lock); 308 if (page) 309 __free_pages(page, CRST_ALLOC_ORDER); 310 return 0; 311 } 312 313 /** 314 * __gmap_segment_gaddr - find virtual address from segment pointer 315 * @entry: pointer to a segment table entry in the guest address space 316 * 317 * Returns the virtual address in the guest address space for the segment 318 */ 319 static unsigned long __gmap_segment_gaddr(unsigned long *entry) 320 { 321 struct page *page; 322 unsigned long offset; 323 324 offset = (unsigned long) entry / sizeof(unsigned long); 325 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE; 326 page = pmd_pgtable_page((pmd_t *) entry); 327 return page->index + offset; 328 } 329 330 /** 331 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address 332 * @gmap: pointer to the guest address space structure 333 * @vmaddr: address in the host process address space 334 * 335 * Returns 1 if a TLB flush is required 336 */ 337 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr) 338 { 339 unsigned long *entry; 340 int flush = 0; 341 342 BUG_ON(gmap_is_shadow(gmap)); 343 spin_lock(&gmap->guest_table_lock); 344 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT); 345 if (entry) { 346 flush = (*entry != _SEGMENT_ENTRY_EMPTY); 347 *entry = _SEGMENT_ENTRY_EMPTY; 348 } 349 spin_unlock(&gmap->guest_table_lock); 350 return flush; 351 } 352 353 /** 354 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address 355 * @gmap: pointer to the guest address space structure 356 * @gaddr: address in the guest address space 357 * 358 * Returns 1 if a TLB flush is required 359 */ 360 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr) 361 { 362 unsigned long vmaddr; 363 364 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host, 365 gaddr >> PMD_SHIFT); 366 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0; 367 } 368 369 /** 370 * gmap_unmap_segment - unmap segment from the guest address space 371 * @gmap: pointer to the guest address space structure 372 * @to: address in the guest address space 373 * @len: length of the memory area to unmap 374 * 375 * Returns 0 if the unmap succeeded, -EINVAL if not. 376 */ 377 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) 378 { 379 unsigned long off; 380 int flush; 381 382 BUG_ON(gmap_is_shadow(gmap)); 383 if ((to | len) & (PMD_SIZE - 1)) 384 return -EINVAL; 385 if (len == 0 || to + len < to) 386 return -EINVAL; 387 388 flush = 0; 389 mmap_write_lock(gmap->mm); 390 for (off = 0; off < len; off += PMD_SIZE) 391 flush |= __gmap_unmap_by_gaddr(gmap, to + off); 392 mmap_write_unlock(gmap->mm); 393 if (flush) 394 gmap_flush_tlb(gmap); 395 return 0; 396 } 397 EXPORT_SYMBOL_GPL(gmap_unmap_segment); 398 399 /** 400 * gmap_map_segment - map a segment to the guest address space 401 * @gmap: pointer to the guest address space structure 402 * @from: source address in the parent address space 403 * @to: target address in the guest address space 404 * @len: length of the memory area to map 405 * 406 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not. 407 */ 408 int gmap_map_segment(struct gmap *gmap, unsigned long from, 409 unsigned long to, unsigned long len) 410 { 411 unsigned long off; 412 int flush; 413 414 BUG_ON(gmap_is_shadow(gmap)); 415 if ((from | to | len) & (PMD_SIZE - 1)) 416 return -EINVAL; 417 if (len == 0 || from + len < from || to + len < to || 418 from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end) 419 return -EINVAL; 420 421 flush = 0; 422 mmap_write_lock(gmap->mm); 423 for (off = 0; off < len; off += PMD_SIZE) { 424 /* Remove old translation */ 425 flush |= __gmap_unmap_by_gaddr(gmap, to + off); 426 /* Store new translation */ 427 if (radix_tree_insert(&gmap->guest_to_host, 428 (to + off) >> PMD_SHIFT, 429 (void *) from + off)) 430 break; 431 } 432 mmap_write_unlock(gmap->mm); 433 if (flush) 434 gmap_flush_tlb(gmap); 435 if (off >= len) 436 return 0; 437 gmap_unmap_segment(gmap, to, len); 438 return -ENOMEM; 439 } 440 EXPORT_SYMBOL_GPL(gmap_map_segment); 441 442 /** 443 * __gmap_translate - translate a guest address to a user space address 444 * @gmap: pointer to guest mapping meta data structure 445 * @gaddr: guest address 446 * 447 * Returns user space address which corresponds to the guest address or 448 * -EFAULT if no such mapping exists. 449 * This function does not establish potentially missing page table entries. 450 * The mmap_lock of the mm that belongs to the address space must be held 451 * when this function gets called. 452 * 453 * Note: Can also be called for shadow gmaps. 454 */ 455 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr) 456 { 457 unsigned long vmaddr; 458 459 vmaddr = (unsigned long) 460 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT); 461 /* Note: guest_to_host is empty for a shadow gmap */ 462 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT; 463 } 464 EXPORT_SYMBOL_GPL(__gmap_translate); 465 466 /** 467 * gmap_translate - translate a guest address to a user space address 468 * @gmap: pointer to guest mapping meta data structure 469 * @gaddr: guest address 470 * 471 * Returns user space address which corresponds to the guest address or 472 * -EFAULT if no such mapping exists. 473 * This function does not establish potentially missing page table entries. 474 */ 475 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr) 476 { 477 unsigned long rc; 478 479 mmap_read_lock(gmap->mm); 480 rc = __gmap_translate(gmap, gaddr); 481 mmap_read_unlock(gmap->mm); 482 return rc; 483 } 484 EXPORT_SYMBOL_GPL(gmap_translate); 485 486 /** 487 * gmap_unlink - disconnect a page table from the gmap shadow tables 488 * @mm: pointer to the parent mm_struct 489 * @table: pointer to the host page table 490 * @vmaddr: vm address associated with the host page table 491 */ 492 void gmap_unlink(struct mm_struct *mm, unsigned long *table, 493 unsigned long vmaddr) 494 { 495 struct gmap *gmap; 496 int flush; 497 498 rcu_read_lock(); 499 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) { 500 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr); 501 if (flush) 502 gmap_flush_tlb(gmap); 503 } 504 rcu_read_unlock(); 505 } 506 507 static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new, 508 unsigned long gaddr); 509 510 /** 511 * __gmap_link - set up shadow page tables to connect a host to a guest address 512 * @gmap: pointer to guest mapping meta data structure 513 * @gaddr: guest address 514 * @vmaddr: vm address 515 * 516 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT 517 * if the vm address is already mapped to a different guest segment. 518 * The mmap_lock of the mm that belongs to the address space must be held 519 * when this function gets called. 520 */ 521 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) 522 { 523 struct mm_struct *mm; 524 unsigned long *table; 525 spinlock_t *ptl; 526 pgd_t *pgd; 527 p4d_t *p4d; 528 pud_t *pud; 529 pmd_t *pmd; 530 u64 unprot; 531 int rc; 532 533 BUG_ON(gmap_is_shadow(gmap)); 534 /* Create higher level tables in the gmap page table */ 535 table = gmap->table; 536 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) { 537 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT; 538 if ((*table & _REGION_ENTRY_INVALID) && 539 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY, 540 gaddr & _REGION1_MASK)) 541 return -ENOMEM; 542 table = __va(*table & _REGION_ENTRY_ORIGIN); 543 } 544 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) { 545 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT; 546 if ((*table & _REGION_ENTRY_INVALID) && 547 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY, 548 gaddr & _REGION2_MASK)) 549 return -ENOMEM; 550 table = __va(*table & _REGION_ENTRY_ORIGIN); 551 } 552 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) { 553 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT; 554 if ((*table & _REGION_ENTRY_INVALID) && 555 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY, 556 gaddr & _REGION3_MASK)) 557 return -ENOMEM; 558 table = __va(*table & _REGION_ENTRY_ORIGIN); 559 } 560 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; 561 /* Walk the parent mm page table */ 562 mm = gmap->mm; 563 pgd = pgd_offset(mm, vmaddr); 564 VM_BUG_ON(pgd_none(*pgd)); 565 p4d = p4d_offset(pgd, vmaddr); 566 VM_BUG_ON(p4d_none(*p4d)); 567 pud = pud_offset(p4d, vmaddr); 568 VM_BUG_ON(pud_none(*pud)); 569 /* large puds cannot yet be handled */ 570 if (pud_leaf(*pud)) 571 return -EFAULT; 572 pmd = pmd_offset(pud, vmaddr); 573 VM_BUG_ON(pmd_none(*pmd)); 574 /* Are we allowed to use huge pages? */ 575 if (pmd_leaf(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m) 576 return -EFAULT; 577 /* Link gmap segment table entry location to page table. */ 578 rc = radix_tree_preload(GFP_KERNEL_ACCOUNT); 579 if (rc) 580 return rc; 581 ptl = pmd_lock(mm, pmd); 582 spin_lock(&gmap->guest_table_lock); 583 if (*table == _SEGMENT_ENTRY_EMPTY) { 584 rc = radix_tree_insert(&gmap->host_to_guest, 585 vmaddr >> PMD_SHIFT, table); 586 if (!rc) { 587 if (pmd_leaf(*pmd)) { 588 *table = (pmd_val(*pmd) & 589 _SEGMENT_ENTRY_HARDWARE_BITS_LARGE) 590 | _SEGMENT_ENTRY_GMAP_UC 591 | _SEGMENT_ENTRY; 592 } else 593 *table = pmd_val(*pmd) & 594 _SEGMENT_ENTRY_HARDWARE_BITS; 595 } 596 } else if (*table & _SEGMENT_ENTRY_PROTECT && 597 !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) { 598 unprot = (u64)*table; 599 unprot &= ~_SEGMENT_ENTRY_PROTECT; 600 unprot |= _SEGMENT_ENTRY_GMAP_UC; 601 gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr); 602 } 603 spin_unlock(&gmap->guest_table_lock); 604 spin_unlock(ptl); 605 radix_tree_preload_end(); 606 return rc; 607 } 608 609 /** 610 * fixup_user_fault_nowait - manually resolve a user page fault without waiting 611 * @mm: mm_struct of target mm 612 * @address: user address 613 * @fault_flags:flags to pass down to handle_mm_fault() 614 * @unlocked: did we unlock the mmap_lock while retrying 615 * 616 * This function behaves similarly to fixup_user_fault(), but it guarantees 617 * that the fault will be resolved without waiting. The function might drop 618 * and re-acquire the mm lock, in which case @unlocked will be set to true. 619 * 620 * The guarantee is that the fault is handled without waiting, but the 621 * function itself might sleep, due to the lock. 622 * 623 * Context: Needs to be called with mm->mmap_lock held in read mode, and will 624 * return with the lock held in read mode; @unlocked will indicate whether 625 * the lock has been dropped and re-acquired. This is the same behaviour as 626 * fixup_user_fault(). 627 * 628 * Return: 0 on success, -EAGAIN if the fault cannot be resolved without 629 * waiting, -EFAULT if the fault cannot be resolved, -ENOMEM if out of 630 * memory. 631 */ 632 static int fixup_user_fault_nowait(struct mm_struct *mm, unsigned long address, 633 unsigned int fault_flags, bool *unlocked) 634 { 635 struct vm_area_struct *vma; 636 unsigned int test_flags; 637 vm_fault_t fault; 638 int rc; 639 640 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; 641 test_flags = fault_flags & FAULT_FLAG_WRITE ? VM_WRITE : VM_READ; 642 643 vma = find_vma(mm, address); 644 if (unlikely(!vma || address < vma->vm_start)) 645 return -EFAULT; 646 if (unlikely(!(vma->vm_flags & test_flags))) 647 return -EFAULT; 648 649 fault = handle_mm_fault(vma, address, fault_flags, NULL); 650 /* the mm lock has been dropped, take it again */ 651 if (fault & VM_FAULT_COMPLETED) { 652 *unlocked = true; 653 mmap_read_lock(mm); 654 return 0; 655 } 656 /* the mm lock has not been dropped */ 657 if (fault & VM_FAULT_ERROR) { 658 rc = vm_fault_to_errno(fault, 0); 659 BUG_ON(!rc); 660 return rc; 661 } 662 /* the mm lock has not been dropped because of FAULT_FLAG_RETRY_NOWAIT */ 663 if (fault & VM_FAULT_RETRY) 664 return -EAGAIN; 665 /* nothing needed to be done and the mm lock has not been dropped */ 666 return 0; 667 } 668 669 /** 670 * __gmap_fault - resolve a fault on a guest address 671 * @gmap: pointer to guest mapping meta data structure 672 * @gaddr: guest address 673 * @fault_flags: flags to pass down to handle_mm_fault() 674 * 675 * Context: Needs to be called with mm->mmap_lock held in read mode. Might 676 * drop and re-acquire the lock. Will always return with the lock held. 677 */ 678 static int __gmap_fault(struct gmap *gmap, unsigned long gaddr, unsigned int fault_flags) 679 { 680 unsigned long vmaddr; 681 bool unlocked; 682 int rc = 0; 683 684 retry: 685 unlocked = false; 686 687 vmaddr = __gmap_translate(gmap, gaddr); 688 if (IS_ERR_VALUE(vmaddr)) 689 return vmaddr; 690 691 if (fault_flags & FAULT_FLAG_RETRY_NOWAIT) 692 rc = fixup_user_fault_nowait(gmap->mm, vmaddr, fault_flags, &unlocked); 693 else 694 rc = fixup_user_fault(gmap->mm, vmaddr, fault_flags, &unlocked); 695 if (rc) 696 return rc; 697 /* 698 * In the case that fixup_user_fault unlocked the mmap_lock during 699 * fault-in, redo __gmap_translate() to avoid racing with a 700 * map/unmap_segment. 701 * In particular, __gmap_translate(), fixup_user_fault{,_nowait}(), 702 * and __gmap_link() must all be called atomically in one go; if the 703 * lock had been dropped in between, a retry is needed. 704 */ 705 if (unlocked) 706 goto retry; 707 708 return __gmap_link(gmap, gaddr, vmaddr); 709 } 710 711 /** 712 * gmap_fault - resolve a fault on a guest address 713 * @gmap: pointer to guest mapping meta data structure 714 * @gaddr: guest address 715 * @fault_flags: flags to pass down to handle_mm_fault() 716 * 717 * Returns 0 on success, -ENOMEM for out of memory conditions, -EFAULT if the 718 * vm address is already mapped to a different guest segment, and -EAGAIN if 719 * FAULT_FLAG_RETRY_NOWAIT was specified and the fault could not be processed 720 * immediately. 721 */ 722 int gmap_fault(struct gmap *gmap, unsigned long gaddr, unsigned int fault_flags) 723 { 724 int rc; 725 726 mmap_read_lock(gmap->mm); 727 rc = __gmap_fault(gmap, gaddr, fault_flags); 728 mmap_read_unlock(gmap->mm); 729 return rc; 730 } 731 EXPORT_SYMBOL_GPL(gmap_fault); 732 733 /* 734 * this function is assumed to be called with mmap_lock held 735 */ 736 void __gmap_zap(struct gmap *gmap, unsigned long gaddr) 737 { 738 struct vm_area_struct *vma; 739 unsigned long vmaddr; 740 spinlock_t *ptl; 741 pte_t *ptep; 742 743 /* Find the vm address for the guest address */ 744 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host, 745 gaddr >> PMD_SHIFT); 746 if (vmaddr) { 747 vmaddr |= gaddr & ~PMD_MASK; 748 749 vma = vma_lookup(gmap->mm, vmaddr); 750 if (!vma || is_vm_hugetlb_page(vma)) 751 return; 752 753 /* Get pointer to the page table entry */ 754 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); 755 if (likely(ptep)) { 756 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0); 757 pte_unmap_unlock(ptep, ptl); 758 } 759 } 760 } 761 EXPORT_SYMBOL_GPL(__gmap_zap); 762 763 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to) 764 { 765 unsigned long gaddr, vmaddr, size; 766 struct vm_area_struct *vma; 767 768 mmap_read_lock(gmap->mm); 769 for (gaddr = from; gaddr < to; 770 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) { 771 /* Find the vm address for the guest address */ 772 vmaddr = (unsigned long) 773 radix_tree_lookup(&gmap->guest_to_host, 774 gaddr >> PMD_SHIFT); 775 if (!vmaddr) 776 continue; 777 vmaddr |= gaddr & ~PMD_MASK; 778 /* Find vma in the parent mm */ 779 vma = find_vma(gmap->mm, vmaddr); 780 if (!vma) 781 continue; 782 /* 783 * We do not discard pages that are backed by 784 * hugetlbfs, so we don't have to refault them. 785 */ 786 if (is_vm_hugetlb_page(vma)) 787 continue; 788 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK)); 789 zap_page_range_single(vma, vmaddr, size, NULL); 790 } 791 mmap_read_unlock(gmap->mm); 792 } 793 EXPORT_SYMBOL_GPL(gmap_discard); 794 795 static LIST_HEAD(gmap_notifier_list); 796 static DEFINE_SPINLOCK(gmap_notifier_lock); 797 798 /** 799 * gmap_register_pte_notifier - register a pte invalidation callback 800 * @nb: pointer to the gmap notifier block 801 */ 802 void gmap_register_pte_notifier(struct gmap_notifier *nb) 803 { 804 spin_lock(&gmap_notifier_lock); 805 list_add_rcu(&nb->list, &gmap_notifier_list); 806 spin_unlock(&gmap_notifier_lock); 807 } 808 EXPORT_SYMBOL_GPL(gmap_register_pte_notifier); 809 810 /** 811 * gmap_unregister_pte_notifier - remove a pte invalidation callback 812 * @nb: pointer to the gmap notifier block 813 */ 814 void gmap_unregister_pte_notifier(struct gmap_notifier *nb) 815 { 816 spin_lock(&gmap_notifier_lock); 817 list_del_rcu(&nb->list); 818 spin_unlock(&gmap_notifier_lock); 819 synchronize_rcu(); 820 } 821 EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier); 822 823 /** 824 * gmap_call_notifier - call all registered invalidation callbacks 825 * @gmap: pointer to guest mapping meta data structure 826 * @start: start virtual address in the guest address space 827 * @end: end virtual address in the guest address space 828 */ 829 static void gmap_call_notifier(struct gmap *gmap, unsigned long start, 830 unsigned long end) 831 { 832 struct gmap_notifier *nb; 833 834 list_for_each_entry(nb, &gmap_notifier_list, list) 835 nb->notifier_call(gmap, start, end); 836 } 837 838 /** 839 * gmap_table_walk - walk the gmap page tables 840 * @gmap: pointer to guest mapping meta data structure 841 * @gaddr: virtual address in the guest address space 842 * @level: page table level to stop at 843 * 844 * Returns a table entry pointer for the given guest address and @level 845 * @level=0 : returns a pointer to a page table table entry (or NULL) 846 * @level=1 : returns a pointer to a segment table entry (or NULL) 847 * @level=2 : returns a pointer to a region-3 table entry (or NULL) 848 * @level=3 : returns a pointer to a region-2 table entry (or NULL) 849 * @level=4 : returns a pointer to a region-1 table entry (or NULL) 850 * 851 * Returns NULL if the gmap page tables could not be walked to the 852 * requested level. 853 * 854 * Note: Can also be called for shadow gmaps. 855 */ 856 static inline unsigned long *gmap_table_walk(struct gmap *gmap, 857 unsigned long gaddr, int level) 858 { 859 const int asce_type = gmap->asce & _ASCE_TYPE_MASK; 860 unsigned long *table = gmap->table; 861 862 if (gmap_is_shadow(gmap) && gmap->removed) 863 return NULL; 864 865 if (WARN_ON_ONCE(level > (asce_type >> 2) + 1)) 866 return NULL; 867 868 if (asce_type != _ASCE_TYPE_REGION1 && 869 gaddr & (-1UL << (31 + (asce_type >> 2) * 11))) 870 return NULL; 871 872 switch (asce_type) { 873 case _ASCE_TYPE_REGION1: 874 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT; 875 if (level == 4) 876 break; 877 if (*table & _REGION_ENTRY_INVALID) 878 return NULL; 879 table = __va(*table & _REGION_ENTRY_ORIGIN); 880 fallthrough; 881 case _ASCE_TYPE_REGION2: 882 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT; 883 if (level == 3) 884 break; 885 if (*table & _REGION_ENTRY_INVALID) 886 return NULL; 887 table = __va(*table & _REGION_ENTRY_ORIGIN); 888 fallthrough; 889 case _ASCE_TYPE_REGION3: 890 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT; 891 if (level == 2) 892 break; 893 if (*table & _REGION_ENTRY_INVALID) 894 return NULL; 895 table = __va(*table & _REGION_ENTRY_ORIGIN); 896 fallthrough; 897 case _ASCE_TYPE_SEGMENT: 898 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; 899 if (level == 1) 900 break; 901 if (*table & _REGION_ENTRY_INVALID) 902 return NULL; 903 table = __va(*table & _SEGMENT_ENTRY_ORIGIN); 904 table += (gaddr & _PAGE_INDEX) >> PAGE_SHIFT; 905 } 906 return table; 907 } 908 909 /** 910 * gmap_pte_op_walk - walk the gmap page table, get the page table lock 911 * and return the pte pointer 912 * @gmap: pointer to guest mapping meta data structure 913 * @gaddr: virtual address in the guest address space 914 * @ptl: pointer to the spinlock pointer 915 * 916 * Returns a pointer to the locked pte for a guest address, or NULL 917 */ 918 static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr, 919 spinlock_t **ptl) 920 { 921 unsigned long *table; 922 923 BUG_ON(gmap_is_shadow(gmap)); 924 /* Walk the gmap page table, lock and get pte pointer */ 925 table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */ 926 if (!table || *table & _SEGMENT_ENTRY_INVALID) 927 return NULL; 928 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl); 929 } 930 931 /** 932 * gmap_pte_op_fixup - force a page in and connect the gmap page table 933 * @gmap: pointer to guest mapping meta data structure 934 * @gaddr: virtual address in the guest address space 935 * @vmaddr: address in the host process address space 936 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE 937 * 938 * Returns 0 if the caller can retry __gmap_translate (might fail again), 939 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing 940 * up or connecting the gmap page table. 941 */ 942 static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr, 943 unsigned long vmaddr, int prot) 944 { 945 struct mm_struct *mm = gmap->mm; 946 unsigned int fault_flags; 947 bool unlocked = false; 948 949 BUG_ON(gmap_is_shadow(gmap)); 950 fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 951 if (fixup_user_fault(mm, vmaddr, fault_flags, &unlocked)) 952 return -EFAULT; 953 if (unlocked) 954 /* lost mmap_lock, caller has to retry __gmap_translate */ 955 return 0; 956 /* Connect the page tables */ 957 return __gmap_link(gmap, gaddr, vmaddr); 958 } 959 960 /** 961 * gmap_pte_op_end - release the page table lock 962 * @ptep: pointer to the locked pte 963 * @ptl: pointer to the page table spinlock 964 */ 965 static void gmap_pte_op_end(pte_t *ptep, spinlock_t *ptl) 966 { 967 pte_unmap_unlock(ptep, ptl); 968 } 969 970 /** 971 * gmap_pmd_op_walk - walk the gmap tables, get the guest table lock 972 * and return the pmd pointer 973 * @gmap: pointer to guest mapping meta data structure 974 * @gaddr: virtual address in the guest address space 975 * 976 * Returns a pointer to the pmd for a guest address, or NULL 977 */ 978 static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr) 979 { 980 pmd_t *pmdp; 981 982 BUG_ON(gmap_is_shadow(gmap)); 983 pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1); 984 if (!pmdp) 985 return NULL; 986 987 /* without huge pages, there is no need to take the table lock */ 988 if (!gmap->mm->context.allow_gmap_hpage_1m) 989 return pmd_none(*pmdp) ? NULL : pmdp; 990 991 spin_lock(&gmap->guest_table_lock); 992 if (pmd_none(*pmdp)) { 993 spin_unlock(&gmap->guest_table_lock); 994 return NULL; 995 } 996 997 /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */ 998 if (!pmd_leaf(*pmdp)) 999 spin_unlock(&gmap->guest_table_lock); 1000 return pmdp; 1001 } 1002 1003 /** 1004 * gmap_pmd_op_end - release the guest_table_lock if needed 1005 * @gmap: pointer to the guest mapping meta data structure 1006 * @pmdp: pointer to the pmd 1007 */ 1008 static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp) 1009 { 1010 if (pmd_leaf(*pmdp)) 1011 spin_unlock(&gmap->guest_table_lock); 1012 } 1013 1014 /* 1015 * gmap_protect_pmd - remove access rights to memory and set pmd notification bits 1016 * @pmdp: pointer to the pmd to be protected 1017 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE 1018 * @bits: notification bits to set 1019 * 1020 * Returns: 1021 * 0 if successfully protected 1022 * -EAGAIN if a fixup is needed 1023 * -EINVAL if unsupported notifier bits have been specified 1024 * 1025 * Expected to be called with sg->mm->mmap_lock in read and 1026 * guest_table_lock held. 1027 */ 1028 static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr, 1029 pmd_t *pmdp, int prot, unsigned long bits) 1030 { 1031 int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID; 1032 int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT; 1033 pmd_t new = *pmdp; 1034 1035 /* Fixup needed */ 1036 if ((pmd_i && (prot != PROT_NONE)) || (pmd_p && (prot == PROT_WRITE))) 1037 return -EAGAIN; 1038 1039 if (prot == PROT_NONE && !pmd_i) { 1040 new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID)); 1041 gmap_pmdp_xchg(gmap, pmdp, new, gaddr); 1042 } 1043 1044 if (prot == PROT_READ && !pmd_p) { 1045 new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID)); 1046 new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_PROTECT)); 1047 gmap_pmdp_xchg(gmap, pmdp, new, gaddr); 1048 } 1049 1050 if (bits & GMAP_NOTIFY_MPROT) 1051 set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN))); 1052 1053 /* Shadow GMAP protection needs split PMDs */ 1054 if (bits & GMAP_NOTIFY_SHADOW) 1055 return -EINVAL; 1056 1057 return 0; 1058 } 1059 1060 /* 1061 * gmap_protect_pte - remove access rights to memory and set pgste bits 1062 * @gmap: pointer to guest mapping meta data structure 1063 * @gaddr: virtual address in the guest address space 1064 * @pmdp: pointer to the pmd associated with the pte 1065 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE 1066 * @bits: notification bits to set 1067 * 1068 * Returns 0 if successfully protected, -ENOMEM if out of memory and 1069 * -EAGAIN if a fixup is needed. 1070 * 1071 * Expected to be called with sg->mm->mmap_lock in read 1072 */ 1073 static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr, 1074 pmd_t *pmdp, int prot, unsigned long bits) 1075 { 1076 int rc; 1077 pte_t *ptep; 1078 spinlock_t *ptl; 1079 unsigned long pbits = 0; 1080 1081 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) 1082 return -EAGAIN; 1083 1084 ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl); 1085 if (!ptep) 1086 return -ENOMEM; 1087 1088 pbits |= (bits & GMAP_NOTIFY_MPROT) ? PGSTE_IN_BIT : 0; 1089 pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0; 1090 /* Protect and unlock. */ 1091 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits); 1092 gmap_pte_op_end(ptep, ptl); 1093 return rc; 1094 } 1095 1096 /* 1097 * gmap_protect_range - remove access rights to memory and set pgste bits 1098 * @gmap: pointer to guest mapping meta data structure 1099 * @gaddr: virtual address in the guest address space 1100 * @len: size of area 1101 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE 1102 * @bits: pgste notification bits to set 1103 * 1104 * Returns 0 if successfully protected, -ENOMEM if out of memory and 1105 * -EFAULT if gaddr is invalid (or mapping for shadows is missing). 1106 * 1107 * Called with sg->mm->mmap_lock in read. 1108 */ 1109 static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr, 1110 unsigned long len, int prot, unsigned long bits) 1111 { 1112 unsigned long vmaddr, dist; 1113 pmd_t *pmdp; 1114 int rc; 1115 1116 BUG_ON(gmap_is_shadow(gmap)); 1117 while (len) { 1118 rc = -EAGAIN; 1119 pmdp = gmap_pmd_op_walk(gmap, gaddr); 1120 if (pmdp) { 1121 if (!pmd_leaf(*pmdp)) { 1122 rc = gmap_protect_pte(gmap, gaddr, pmdp, prot, 1123 bits); 1124 if (!rc) { 1125 len -= PAGE_SIZE; 1126 gaddr += PAGE_SIZE; 1127 } 1128 } else { 1129 rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot, 1130 bits); 1131 if (!rc) { 1132 dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK); 1133 len = len < dist ? 0 : len - dist; 1134 gaddr = (gaddr & HPAGE_MASK) + HPAGE_SIZE; 1135 } 1136 } 1137 gmap_pmd_op_end(gmap, pmdp); 1138 } 1139 if (rc) { 1140 if (rc == -EINVAL) 1141 return rc; 1142 1143 /* -EAGAIN, fixup of userspace mm and gmap */ 1144 vmaddr = __gmap_translate(gmap, gaddr); 1145 if (IS_ERR_VALUE(vmaddr)) 1146 return vmaddr; 1147 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot); 1148 if (rc) 1149 return rc; 1150 } 1151 } 1152 return 0; 1153 } 1154 1155 /** 1156 * gmap_mprotect_notify - change access rights for a range of ptes and 1157 * call the notifier if any pte changes again 1158 * @gmap: pointer to guest mapping meta data structure 1159 * @gaddr: virtual address in the guest address space 1160 * @len: size of area 1161 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE 1162 * 1163 * Returns 0 if for each page in the given range a gmap mapping exists, 1164 * the new access rights could be set and the notifier could be armed. 1165 * If the gmap mapping is missing for one or more pages -EFAULT is 1166 * returned. If no memory could be allocated -ENOMEM is returned. 1167 * This function establishes missing page table entries. 1168 */ 1169 int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr, 1170 unsigned long len, int prot) 1171 { 1172 int rc; 1173 1174 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap)) 1175 return -EINVAL; 1176 if (!MACHINE_HAS_ESOP && prot == PROT_READ) 1177 return -EINVAL; 1178 mmap_read_lock(gmap->mm); 1179 rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT); 1180 mmap_read_unlock(gmap->mm); 1181 return rc; 1182 } 1183 EXPORT_SYMBOL_GPL(gmap_mprotect_notify); 1184 1185 /** 1186 * gmap_read_table - get an unsigned long value from a guest page table using 1187 * absolute addressing, without marking the page referenced. 1188 * @gmap: pointer to guest mapping meta data structure 1189 * @gaddr: virtual address in the guest address space 1190 * @val: pointer to the unsigned long value to return 1191 * 1192 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT 1193 * if reading using the virtual address failed. -EINVAL if called on a gmap 1194 * shadow. 1195 * 1196 * Called with gmap->mm->mmap_lock in read. 1197 */ 1198 int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val) 1199 { 1200 unsigned long address, vmaddr; 1201 spinlock_t *ptl; 1202 pte_t *ptep, pte; 1203 int rc; 1204 1205 if (gmap_is_shadow(gmap)) 1206 return -EINVAL; 1207 1208 while (1) { 1209 rc = -EAGAIN; 1210 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl); 1211 if (ptep) { 1212 pte = *ptep; 1213 if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) { 1214 address = pte_val(pte) & PAGE_MASK; 1215 address += gaddr & ~PAGE_MASK; 1216 *val = *(unsigned long *)__va(address); 1217 set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_YOUNG))); 1218 /* Do *NOT* clear the _PAGE_INVALID bit! */ 1219 rc = 0; 1220 } 1221 gmap_pte_op_end(ptep, ptl); 1222 } 1223 if (!rc) 1224 break; 1225 vmaddr = __gmap_translate(gmap, gaddr); 1226 if (IS_ERR_VALUE(vmaddr)) { 1227 rc = vmaddr; 1228 break; 1229 } 1230 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ); 1231 if (rc) 1232 break; 1233 } 1234 return rc; 1235 } 1236 EXPORT_SYMBOL_GPL(gmap_read_table); 1237 1238 /** 1239 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree 1240 * @sg: pointer to the shadow guest address space structure 1241 * @vmaddr: vm address associated with the rmap 1242 * @rmap: pointer to the rmap structure 1243 * 1244 * Called with the sg->guest_table_lock 1245 */ 1246 static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr, 1247 struct gmap_rmap *rmap) 1248 { 1249 struct gmap_rmap *temp; 1250 void __rcu **slot; 1251 1252 BUG_ON(!gmap_is_shadow(sg)); 1253 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT); 1254 if (slot) { 1255 rmap->next = radix_tree_deref_slot_protected(slot, 1256 &sg->guest_table_lock); 1257 for (temp = rmap->next; temp; temp = temp->next) { 1258 if (temp->raddr == rmap->raddr) { 1259 kfree(rmap); 1260 return; 1261 } 1262 } 1263 radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap); 1264 } else { 1265 rmap->next = NULL; 1266 radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT, 1267 rmap); 1268 } 1269 } 1270 1271 /** 1272 * gmap_protect_rmap - restrict access rights to memory (RO) and create an rmap 1273 * @sg: pointer to the shadow guest address space structure 1274 * @raddr: rmap address in the shadow gmap 1275 * @paddr: address in the parent guest address space 1276 * @len: length of the memory area to protect 1277 * 1278 * Returns 0 if successfully protected and the rmap was created, -ENOMEM 1279 * if out of memory and -EFAULT if paddr is invalid. 1280 */ 1281 static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr, 1282 unsigned long paddr, unsigned long len) 1283 { 1284 struct gmap *parent; 1285 struct gmap_rmap *rmap; 1286 unsigned long vmaddr; 1287 spinlock_t *ptl; 1288 pte_t *ptep; 1289 int rc; 1290 1291 BUG_ON(!gmap_is_shadow(sg)); 1292 parent = sg->parent; 1293 while (len) { 1294 vmaddr = __gmap_translate(parent, paddr); 1295 if (IS_ERR_VALUE(vmaddr)) 1296 return vmaddr; 1297 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT); 1298 if (!rmap) 1299 return -ENOMEM; 1300 rmap->raddr = raddr; 1301 rc = radix_tree_preload(GFP_KERNEL_ACCOUNT); 1302 if (rc) { 1303 kfree(rmap); 1304 return rc; 1305 } 1306 rc = -EAGAIN; 1307 ptep = gmap_pte_op_walk(parent, paddr, &ptl); 1308 if (ptep) { 1309 spin_lock(&sg->guest_table_lock); 1310 rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ, 1311 PGSTE_VSIE_BIT); 1312 if (!rc) 1313 gmap_insert_rmap(sg, vmaddr, rmap); 1314 spin_unlock(&sg->guest_table_lock); 1315 gmap_pte_op_end(ptep, ptl); 1316 } 1317 radix_tree_preload_end(); 1318 if (rc) { 1319 kfree(rmap); 1320 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, PROT_READ); 1321 if (rc) 1322 return rc; 1323 continue; 1324 } 1325 paddr += PAGE_SIZE; 1326 len -= PAGE_SIZE; 1327 } 1328 return 0; 1329 } 1330 1331 #define _SHADOW_RMAP_MASK 0x7 1332 #define _SHADOW_RMAP_REGION1 0x5 1333 #define _SHADOW_RMAP_REGION2 0x4 1334 #define _SHADOW_RMAP_REGION3 0x3 1335 #define _SHADOW_RMAP_SEGMENT 0x2 1336 #define _SHADOW_RMAP_PGTABLE 0x1 1337 1338 /** 1339 * gmap_idte_one - invalidate a single region or segment table entry 1340 * @asce: region or segment table *origin* + table-type bits 1341 * @vaddr: virtual address to identify the table entry to flush 1342 * 1343 * The invalid bit of a single region or segment table entry is set 1344 * and the associated TLB entries depending on the entry are flushed. 1345 * The table-type of the @asce identifies the portion of the @vaddr 1346 * that is used as the invalidation index. 1347 */ 1348 static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr) 1349 { 1350 asm volatile( 1351 " idte %0,0,%1" 1352 : : "a" (asce), "a" (vaddr) : "cc", "memory"); 1353 } 1354 1355 /** 1356 * gmap_unshadow_page - remove a page from a shadow page table 1357 * @sg: pointer to the shadow guest address space structure 1358 * @raddr: rmap address in the shadow guest address space 1359 * 1360 * Called with the sg->guest_table_lock 1361 */ 1362 static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr) 1363 { 1364 unsigned long *table; 1365 1366 BUG_ON(!gmap_is_shadow(sg)); 1367 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */ 1368 if (!table || *table & _PAGE_INVALID) 1369 return; 1370 gmap_call_notifier(sg, raddr, raddr + PAGE_SIZE - 1); 1371 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table); 1372 } 1373 1374 /** 1375 * __gmap_unshadow_pgt - remove all entries from a shadow page table 1376 * @sg: pointer to the shadow guest address space structure 1377 * @raddr: rmap address in the shadow guest address space 1378 * @pgt: pointer to the start of a shadow page table 1379 * 1380 * Called with the sg->guest_table_lock 1381 */ 1382 static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr, 1383 unsigned long *pgt) 1384 { 1385 int i; 1386 1387 BUG_ON(!gmap_is_shadow(sg)); 1388 for (i = 0; i < _PAGE_ENTRIES; i++, raddr += PAGE_SIZE) 1389 pgt[i] = _PAGE_INVALID; 1390 } 1391 1392 /** 1393 * gmap_unshadow_pgt - remove a shadow page table from a segment entry 1394 * @sg: pointer to the shadow guest address space structure 1395 * @raddr: address in the shadow guest address space 1396 * 1397 * Called with the sg->guest_table_lock 1398 */ 1399 static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr) 1400 { 1401 unsigned long *ste; 1402 phys_addr_t sto, pgt; 1403 struct ptdesc *ptdesc; 1404 1405 BUG_ON(!gmap_is_shadow(sg)); 1406 ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */ 1407 if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN)) 1408 return; 1409 gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1); 1410 sto = __pa(ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT)); 1411 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr); 1412 pgt = *ste & _SEGMENT_ENTRY_ORIGIN; 1413 *ste = _SEGMENT_ENTRY_EMPTY; 1414 __gmap_unshadow_pgt(sg, raddr, __va(pgt)); 1415 /* Free page table */ 1416 ptdesc = page_ptdesc(phys_to_page(pgt)); 1417 list_del(&ptdesc->pt_list); 1418 page_table_free_pgste(ptdesc); 1419 } 1420 1421 /** 1422 * __gmap_unshadow_sgt - remove all entries from a shadow segment table 1423 * @sg: pointer to the shadow guest address space structure 1424 * @raddr: rmap address in the shadow guest address space 1425 * @sgt: pointer to the start of a shadow segment table 1426 * 1427 * Called with the sg->guest_table_lock 1428 */ 1429 static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr, 1430 unsigned long *sgt) 1431 { 1432 struct ptdesc *ptdesc; 1433 phys_addr_t pgt; 1434 int i; 1435 1436 BUG_ON(!gmap_is_shadow(sg)); 1437 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) { 1438 if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN)) 1439 continue; 1440 pgt = sgt[i] & _REGION_ENTRY_ORIGIN; 1441 sgt[i] = _SEGMENT_ENTRY_EMPTY; 1442 __gmap_unshadow_pgt(sg, raddr, __va(pgt)); 1443 /* Free page table */ 1444 ptdesc = page_ptdesc(phys_to_page(pgt)); 1445 list_del(&ptdesc->pt_list); 1446 page_table_free_pgste(ptdesc); 1447 } 1448 } 1449 1450 /** 1451 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry 1452 * @sg: pointer to the shadow guest address space structure 1453 * @raddr: rmap address in the shadow guest address space 1454 * 1455 * Called with the shadow->guest_table_lock 1456 */ 1457 static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr) 1458 { 1459 unsigned long r3o, *r3e; 1460 phys_addr_t sgt; 1461 struct page *page; 1462 1463 BUG_ON(!gmap_is_shadow(sg)); 1464 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */ 1465 if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN)) 1466 return; 1467 gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1); 1468 r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT)); 1469 gmap_idte_one(__pa(r3o) | _ASCE_TYPE_REGION3, raddr); 1470 sgt = *r3e & _REGION_ENTRY_ORIGIN; 1471 *r3e = _REGION3_ENTRY_EMPTY; 1472 __gmap_unshadow_sgt(sg, raddr, __va(sgt)); 1473 /* Free segment table */ 1474 page = phys_to_page(sgt); 1475 list_del(&page->lru); 1476 __free_pages(page, CRST_ALLOC_ORDER); 1477 } 1478 1479 /** 1480 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table 1481 * @sg: pointer to the shadow guest address space structure 1482 * @raddr: address in the shadow guest address space 1483 * @r3t: pointer to the start of a shadow region-3 table 1484 * 1485 * Called with the sg->guest_table_lock 1486 */ 1487 static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr, 1488 unsigned long *r3t) 1489 { 1490 struct page *page; 1491 phys_addr_t sgt; 1492 int i; 1493 1494 BUG_ON(!gmap_is_shadow(sg)); 1495 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) { 1496 if (!(r3t[i] & _REGION_ENTRY_ORIGIN)) 1497 continue; 1498 sgt = r3t[i] & _REGION_ENTRY_ORIGIN; 1499 r3t[i] = _REGION3_ENTRY_EMPTY; 1500 __gmap_unshadow_sgt(sg, raddr, __va(sgt)); 1501 /* Free segment table */ 1502 page = phys_to_page(sgt); 1503 list_del(&page->lru); 1504 __free_pages(page, CRST_ALLOC_ORDER); 1505 } 1506 } 1507 1508 /** 1509 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry 1510 * @sg: pointer to the shadow guest address space structure 1511 * @raddr: rmap address in the shadow guest address space 1512 * 1513 * Called with the sg->guest_table_lock 1514 */ 1515 static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr) 1516 { 1517 unsigned long r2o, *r2e; 1518 phys_addr_t r3t; 1519 struct page *page; 1520 1521 BUG_ON(!gmap_is_shadow(sg)); 1522 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */ 1523 if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN)) 1524 return; 1525 gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1); 1526 r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT)); 1527 gmap_idte_one(__pa(r2o) | _ASCE_TYPE_REGION2, raddr); 1528 r3t = *r2e & _REGION_ENTRY_ORIGIN; 1529 *r2e = _REGION2_ENTRY_EMPTY; 1530 __gmap_unshadow_r3t(sg, raddr, __va(r3t)); 1531 /* Free region 3 table */ 1532 page = phys_to_page(r3t); 1533 list_del(&page->lru); 1534 __free_pages(page, CRST_ALLOC_ORDER); 1535 } 1536 1537 /** 1538 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table 1539 * @sg: pointer to the shadow guest address space structure 1540 * @raddr: rmap address in the shadow guest address space 1541 * @r2t: pointer to the start of a shadow region-2 table 1542 * 1543 * Called with the sg->guest_table_lock 1544 */ 1545 static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr, 1546 unsigned long *r2t) 1547 { 1548 phys_addr_t r3t; 1549 struct page *page; 1550 int i; 1551 1552 BUG_ON(!gmap_is_shadow(sg)); 1553 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) { 1554 if (!(r2t[i] & _REGION_ENTRY_ORIGIN)) 1555 continue; 1556 r3t = r2t[i] & _REGION_ENTRY_ORIGIN; 1557 r2t[i] = _REGION2_ENTRY_EMPTY; 1558 __gmap_unshadow_r3t(sg, raddr, __va(r3t)); 1559 /* Free region 3 table */ 1560 page = phys_to_page(r3t); 1561 list_del(&page->lru); 1562 __free_pages(page, CRST_ALLOC_ORDER); 1563 } 1564 } 1565 1566 /** 1567 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry 1568 * @sg: pointer to the shadow guest address space structure 1569 * @raddr: rmap address in the shadow guest address space 1570 * 1571 * Called with the sg->guest_table_lock 1572 */ 1573 static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr) 1574 { 1575 unsigned long r1o, *r1e; 1576 struct page *page; 1577 phys_addr_t r2t; 1578 1579 BUG_ON(!gmap_is_shadow(sg)); 1580 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */ 1581 if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN)) 1582 return; 1583 gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1); 1584 r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT)); 1585 gmap_idte_one(__pa(r1o) | _ASCE_TYPE_REGION1, raddr); 1586 r2t = *r1e & _REGION_ENTRY_ORIGIN; 1587 *r1e = _REGION1_ENTRY_EMPTY; 1588 __gmap_unshadow_r2t(sg, raddr, __va(r2t)); 1589 /* Free region 2 table */ 1590 page = phys_to_page(r2t); 1591 list_del(&page->lru); 1592 __free_pages(page, CRST_ALLOC_ORDER); 1593 } 1594 1595 /** 1596 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table 1597 * @sg: pointer to the shadow guest address space structure 1598 * @raddr: rmap address in the shadow guest address space 1599 * @r1t: pointer to the start of a shadow region-1 table 1600 * 1601 * Called with the shadow->guest_table_lock 1602 */ 1603 static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr, 1604 unsigned long *r1t) 1605 { 1606 unsigned long asce; 1607 struct page *page; 1608 phys_addr_t r2t; 1609 int i; 1610 1611 BUG_ON(!gmap_is_shadow(sg)); 1612 asce = __pa(r1t) | _ASCE_TYPE_REGION1; 1613 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) { 1614 if (!(r1t[i] & _REGION_ENTRY_ORIGIN)) 1615 continue; 1616 r2t = r1t[i] & _REGION_ENTRY_ORIGIN; 1617 __gmap_unshadow_r2t(sg, raddr, __va(r2t)); 1618 /* Clear entry and flush translation r1t -> r2t */ 1619 gmap_idte_one(asce, raddr); 1620 r1t[i] = _REGION1_ENTRY_EMPTY; 1621 /* Free region 2 table */ 1622 page = phys_to_page(r2t); 1623 list_del(&page->lru); 1624 __free_pages(page, CRST_ALLOC_ORDER); 1625 } 1626 } 1627 1628 /** 1629 * gmap_unshadow - remove a shadow page table completely 1630 * @sg: pointer to the shadow guest address space structure 1631 * 1632 * Called with sg->guest_table_lock 1633 */ 1634 static void gmap_unshadow(struct gmap *sg) 1635 { 1636 unsigned long *table; 1637 1638 BUG_ON(!gmap_is_shadow(sg)); 1639 if (sg->removed) 1640 return; 1641 sg->removed = 1; 1642 gmap_call_notifier(sg, 0, -1UL); 1643 gmap_flush_tlb(sg); 1644 table = __va(sg->asce & _ASCE_ORIGIN); 1645 switch (sg->asce & _ASCE_TYPE_MASK) { 1646 case _ASCE_TYPE_REGION1: 1647 __gmap_unshadow_r1t(sg, 0, table); 1648 break; 1649 case _ASCE_TYPE_REGION2: 1650 __gmap_unshadow_r2t(sg, 0, table); 1651 break; 1652 case _ASCE_TYPE_REGION3: 1653 __gmap_unshadow_r3t(sg, 0, table); 1654 break; 1655 case _ASCE_TYPE_SEGMENT: 1656 __gmap_unshadow_sgt(sg, 0, table); 1657 break; 1658 } 1659 } 1660 1661 /** 1662 * gmap_find_shadow - find a specific asce in the list of shadow tables 1663 * @parent: pointer to the parent gmap 1664 * @asce: ASCE for which the shadow table is created 1665 * @edat_level: edat level to be used for the shadow translation 1666 * 1667 * Returns the pointer to a gmap if a shadow table with the given asce is 1668 * already available, ERR_PTR(-EAGAIN) if another one is just being created, 1669 * otherwise NULL 1670 */ 1671 static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce, 1672 int edat_level) 1673 { 1674 struct gmap *sg; 1675 1676 list_for_each_entry(sg, &parent->children, list) { 1677 if (sg->orig_asce != asce || sg->edat_level != edat_level || 1678 sg->removed) 1679 continue; 1680 if (!sg->initialized) 1681 return ERR_PTR(-EAGAIN); 1682 refcount_inc(&sg->ref_count); 1683 return sg; 1684 } 1685 return NULL; 1686 } 1687 1688 /** 1689 * gmap_shadow_valid - check if a shadow guest address space matches the 1690 * given properties and is still valid 1691 * @sg: pointer to the shadow guest address space structure 1692 * @asce: ASCE for which the shadow table is requested 1693 * @edat_level: edat level to be used for the shadow translation 1694 * 1695 * Returns 1 if the gmap shadow is still valid and matches the given 1696 * properties, the caller can continue using it. Returns 0 otherwise, the 1697 * caller has to request a new shadow gmap in this case. 1698 * 1699 */ 1700 int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level) 1701 { 1702 if (sg->removed) 1703 return 0; 1704 return sg->orig_asce == asce && sg->edat_level == edat_level; 1705 } 1706 EXPORT_SYMBOL_GPL(gmap_shadow_valid); 1707 1708 /** 1709 * gmap_shadow - create/find a shadow guest address space 1710 * @parent: pointer to the parent gmap 1711 * @asce: ASCE for which the shadow table is created 1712 * @edat_level: edat level to be used for the shadow translation 1713 * 1714 * The pages of the top level page table referred by the asce parameter 1715 * will be set to read-only and marked in the PGSTEs of the kvm process. 1716 * The shadow table will be removed automatically on any change to the 1717 * PTE mapping for the source table. 1718 * 1719 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory, 1720 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the 1721 * parent gmap table could not be protected. 1722 */ 1723 struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, 1724 int edat_level) 1725 { 1726 struct gmap *sg, *new; 1727 unsigned long limit; 1728 int rc; 1729 1730 BUG_ON(parent->mm->context.allow_gmap_hpage_1m); 1731 BUG_ON(gmap_is_shadow(parent)); 1732 spin_lock(&parent->shadow_lock); 1733 sg = gmap_find_shadow(parent, asce, edat_level); 1734 spin_unlock(&parent->shadow_lock); 1735 if (sg) 1736 return sg; 1737 /* Create a new shadow gmap */ 1738 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11)); 1739 if (asce & _ASCE_REAL_SPACE) 1740 limit = -1UL; 1741 new = gmap_alloc(limit); 1742 if (!new) 1743 return ERR_PTR(-ENOMEM); 1744 new->mm = parent->mm; 1745 new->parent = gmap_get(parent); 1746 new->private = parent->private; 1747 new->orig_asce = asce; 1748 new->edat_level = edat_level; 1749 new->initialized = false; 1750 spin_lock(&parent->shadow_lock); 1751 /* Recheck if another CPU created the same shadow */ 1752 sg = gmap_find_shadow(parent, asce, edat_level); 1753 if (sg) { 1754 spin_unlock(&parent->shadow_lock); 1755 gmap_free(new); 1756 return sg; 1757 } 1758 if (asce & _ASCE_REAL_SPACE) { 1759 /* only allow one real-space gmap shadow */ 1760 list_for_each_entry(sg, &parent->children, list) { 1761 if (sg->orig_asce & _ASCE_REAL_SPACE) { 1762 spin_lock(&sg->guest_table_lock); 1763 gmap_unshadow(sg); 1764 spin_unlock(&sg->guest_table_lock); 1765 list_del(&sg->list); 1766 gmap_put(sg); 1767 break; 1768 } 1769 } 1770 } 1771 refcount_set(&new->ref_count, 2); 1772 list_add(&new->list, &parent->children); 1773 if (asce & _ASCE_REAL_SPACE) { 1774 /* nothing to protect, return right away */ 1775 new->initialized = true; 1776 spin_unlock(&parent->shadow_lock); 1777 return new; 1778 } 1779 spin_unlock(&parent->shadow_lock); 1780 /* protect after insertion, so it will get properly invalidated */ 1781 mmap_read_lock(parent->mm); 1782 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN, 1783 ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE, 1784 PROT_READ, GMAP_NOTIFY_SHADOW); 1785 mmap_read_unlock(parent->mm); 1786 spin_lock(&parent->shadow_lock); 1787 new->initialized = true; 1788 if (rc) { 1789 list_del(&new->list); 1790 gmap_free(new); 1791 new = ERR_PTR(rc); 1792 } 1793 spin_unlock(&parent->shadow_lock); 1794 return new; 1795 } 1796 EXPORT_SYMBOL_GPL(gmap_shadow); 1797 1798 /** 1799 * gmap_shadow_r2t - create an empty shadow region 2 table 1800 * @sg: pointer to the shadow guest address space structure 1801 * @saddr: faulting address in the shadow gmap 1802 * @r2t: parent gmap address of the region 2 table to get shadowed 1803 * @fake: r2t references contiguous guest memory block, not a r2t 1804 * 1805 * The r2t parameter specifies the address of the source table. The 1806 * four pages of the source table are made read-only in the parent gmap 1807 * address space. A write to the source table area @r2t will automatically 1808 * remove the shadow r2 table and all of its descendants. 1809 * 1810 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the 1811 * shadow table structure is incomplete, -ENOMEM if out of memory and 1812 * -EFAULT if an address in the parent gmap could not be resolved. 1813 * 1814 * Called with sg->mm->mmap_lock in read. 1815 */ 1816 int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t, 1817 int fake) 1818 { 1819 unsigned long raddr, origin, offset, len; 1820 unsigned long *table; 1821 phys_addr_t s_r2t; 1822 struct page *page; 1823 int rc; 1824 1825 BUG_ON(!gmap_is_shadow(sg)); 1826 /* Allocate a shadow region second table */ 1827 page = gmap_alloc_crst(); 1828 if (!page) 1829 return -ENOMEM; 1830 page->index = r2t & _REGION_ENTRY_ORIGIN; 1831 if (fake) 1832 page->index |= GMAP_SHADOW_FAKE_TABLE; 1833 s_r2t = page_to_phys(page); 1834 /* Install shadow region second table */ 1835 spin_lock(&sg->guest_table_lock); 1836 table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */ 1837 if (!table) { 1838 rc = -EAGAIN; /* Race with unshadow */ 1839 goto out_free; 1840 } 1841 if (!(*table & _REGION_ENTRY_INVALID)) { 1842 rc = 0; /* Already established */ 1843 goto out_free; 1844 } else if (*table & _REGION_ENTRY_ORIGIN) { 1845 rc = -EAGAIN; /* Race with shadow */ 1846 goto out_free; 1847 } 1848 crst_table_init(__va(s_r2t), _REGION2_ENTRY_EMPTY); 1849 /* mark as invalid as long as the parent table is not protected */ 1850 *table = s_r2t | _REGION_ENTRY_LENGTH | 1851 _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID; 1852 if (sg->edat_level >= 1) 1853 *table |= (r2t & _REGION_ENTRY_PROTECT); 1854 list_add(&page->lru, &sg->crst_list); 1855 if (fake) { 1856 /* nothing to protect for fake tables */ 1857 *table &= ~_REGION_ENTRY_INVALID; 1858 spin_unlock(&sg->guest_table_lock); 1859 return 0; 1860 } 1861 spin_unlock(&sg->guest_table_lock); 1862 /* Make r2t read-only in parent gmap page table */ 1863 raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1; 1864 origin = r2t & _REGION_ENTRY_ORIGIN; 1865 offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE; 1866 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset; 1867 rc = gmap_protect_rmap(sg, raddr, origin + offset, len); 1868 spin_lock(&sg->guest_table_lock); 1869 if (!rc) { 1870 table = gmap_table_walk(sg, saddr, 4); 1871 if (!table || (*table & _REGION_ENTRY_ORIGIN) != s_r2t) 1872 rc = -EAGAIN; /* Race with unshadow */ 1873 else 1874 *table &= ~_REGION_ENTRY_INVALID; 1875 } else { 1876 gmap_unshadow_r2t(sg, raddr); 1877 } 1878 spin_unlock(&sg->guest_table_lock); 1879 return rc; 1880 out_free: 1881 spin_unlock(&sg->guest_table_lock); 1882 __free_pages(page, CRST_ALLOC_ORDER); 1883 return rc; 1884 } 1885 EXPORT_SYMBOL_GPL(gmap_shadow_r2t); 1886 1887 /** 1888 * gmap_shadow_r3t - create a shadow region 3 table 1889 * @sg: pointer to the shadow guest address space structure 1890 * @saddr: faulting address in the shadow gmap 1891 * @r3t: parent gmap address of the region 3 table to get shadowed 1892 * @fake: r3t references contiguous guest memory block, not a r3t 1893 * 1894 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the 1895 * shadow table structure is incomplete, -ENOMEM if out of memory and 1896 * -EFAULT if an address in the parent gmap could not be resolved. 1897 * 1898 * Called with sg->mm->mmap_lock in read. 1899 */ 1900 int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, 1901 int fake) 1902 { 1903 unsigned long raddr, origin, offset, len; 1904 unsigned long *table; 1905 phys_addr_t s_r3t; 1906 struct page *page; 1907 int rc; 1908 1909 BUG_ON(!gmap_is_shadow(sg)); 1910 /* Allocate a shadow region second table */ 1911 page = gmap_alloc_crst(); 1912 if (!page) 1913 return -ENOMEM; 1914 page->index = r3t & _REGION_ENTRY_ORIGIN; 1915 if (fake) 1916 page->index |= GMAP_SHADOW_FAKE_TABLE; 1917 s_r3t = page_to_phys(page); 1918 /* Install shadow region second table */ 1919 spin_lock(&sg->guest_table_lock); 1920 table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */ 1921 if (!table) { 1922 rc = -EAGAIN; /* Race with unshadow */ 1923 goto out_free; 1924 } 1925 if (!(*table & _REGION_ENTRY_INVALID)) { 1926 rc = 0; /* Already established */ 1927 goto out_free; 1928 } else if (*table & _REGION_ENTRY_ORIGIN) { 1929 rc = -EAGAIN; /* Race with shadow */ 1930 goto out_free; 1931 } 1932 crst_table_init(__va(s_r3t), _REGION3_ENTRY_EMPTY); 1933 /* mark as invalid as long as the parent table is not protected */ 1934 *table = s_r3t | _REGION_ENTRY_LENGTH | 1935 _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID; 1936 if (sg->edat_level >= 1) 1937 *table |= (r3t & _REGION_ENTRY_PROTECT); 1938 list_add(&page->lru, &sg->crst_list); 1939 if (fake) { 1940 /* nothing to protect for fake tables */ 1941 *table &= ~_REGION_ENTRY_INVALID; 1942 spin_unlock(&sg->guest_table_lock); 1943 return 0; 1944 } 1945 spin_unlock(&sg->guest_table_lock); 1946 /* Make r3t read-only in parent gmap page table */ 1947 raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2; 1948 origin = r3t & _REGION_ENTRY_ORIGIN; 1949 offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE; 1950 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset; 1951 rc = gmap_protect_rmap(sg, raddr, origin + offset, len); 1952 spin_lock(&sg->guest_table_lock); 1953 if (!rc) { 1954 table = gmap_table_walk(sg, saddr, 3); 1955 if (!table || (*table & _REGION_ENTRY_ORIGIN) != s_r3t) 1956 rc = -EAGAIN; /* Race with unshadow */ 1957 else 1958 *table &= ~_REGION_ENTRY_INVALID; 1959 } else { 1960 gmap_unshadow_r3t(sg, raddr); 1961 } 1962 spin_unlock(&sg->guest_table_lock); 1963 return rc; 1964 out_free: 1965 spin_unlock(&sg->guest_table_lock); 1966 __free_pages(page, CRST_ALLOC_ORDER); 1967 return rc; 1968 } 1969 EXPORT_SYMBOL_GPL(gmap_shadow_r3t); 1970 1971 /** 1972 * gmap_shadow_sgt - create a shadow segment table 1973 * @sg: pointer to the shadow guest address space structure 1974 * @saddr: faulting address in the shadow gmap 1975 * @sgt: parent gmap address of the segment table to get shadowed 1976 * @fake: sgt references contiguous guest memory block, not a sgt 1977 * 1978 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the 1979 * shadow table structure is incomplete, -ENOMEM if out of memory and 1980 * -EFAULT if an address in the parent gmap could not be resolved. 1981 * 1982 * Called with sg->mm->mmap_lock in read. 1983 */ 1984 int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt, 1985 int fake) 1986 { 1987 unsigned long raddr, origin, offset, len; 1988 unsigned long *table; 1989 phys_addr_t s_sgt; 1990 struct page *page; 1991 int rc; 1992 1993 BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE)); 1994 /* Allocate a shadow segment table */ 1995 page = gmap_alloc_crst(); 1996 if (!page) 1997 return -ENOMEM; 1998 page->index = sgt & _REGION_ENTRY_ORIGIN; 1999 if (fake) 2000 page->index |= GMAP_SHADOW_FAKE_TABLE; 2001 s_sgt = page_to_phys(page); 2002 /* Install shadow region second table */ 2003 spin_lock(&sg->guest_table_lock); 2004 table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */ 2005 if (!table) { 2006 rc = -EAGAIN; /* Race with unshadow */ 2007 goto out_free; 2008 } 2009 if (!(*table & _REGION_ENTRY_INVALID)) { 2010 rc = 0; /* Already established */ 2011 goto out_free; 2012 } else if (*table & _REGION_ENTRY_ORIGIN) { 2013 rc = -EAGAIN; /* Race with shadow */ 2014 goto out_free; 2015 } 2016 crst_table_init(__va(s_sgt), _SEGMENT_ENTRY_EMPTY); 2017 /* mark as invalid as long as the parent table is not protected */ 2018 *table = s_sgt | _REGION_ENTRY_LENGTH | 2019 _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID; 2020 if (sg->edat_level >= 1) 2021 *table |= sgt & _REGION_ENTRY_PROTECT; 2022 list_add(&page->lru, &sg->crst_list); 2023 if (fake) { 2024 /* nothing to protect for fake tables */ 2025 *table &= ~_REGION_ENTRY_INVALID; 2026 spin_unlock(&sg->guest_table_lock); 2027 return 0; 2028 } 2029 spin_unlock(&sg->guest_table_lock); 2030 /* Make sgt read-only in parent gmap page table */ 2031 raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3; 2032 origin = sgt & _REGION_ENTRY_ORIGIN; 2033 offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE; 2034 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset; 2035 rc = gmap_protect_rmap(sg, raddr, origin + offset, len); 2036 spin_lock(&sg->guest_table_lock); 2037 if (!rc) { 2038 table = gmap_table_walk(sg, saddr, 2); 2039 if (!table || (*table & _REGION_ENTRY_ORIGIN) != s_sgt) 2040 rc = -EAGAIN; /* Race with unshadow */ 2041 else 2042 *table &= ~_REGION_ENTRY_INVALID; 2043 } else { 2044 gmap_unshadow_sgt(sg, raddr); 2045 } 2046 spin_unlock(&sg->guest_table_lock); 2047 return rc; 2048 out_free: 2049 spin_unlock(&sg->guest_table_lock); 2050 __free_pages(page, CRST_ALLOC_ORDER); 2051 return rc; 2052 } 2053 EXPORT_SYMBOL_GPL(gmap_shadow_sgt); 2054 2055 /** 2056 * gmap_shadow_pgt_lookup - find a shadow page table 2057 * @sg: pointer to the shadow guest address space structure 2058 * @saddr: the address in the shadow aguest address space 2059 * @pgt: parent gmap address of the page table to get shadowed 2060 * @dat_protection: if the pgtable is marked as protected by dat 2061 * @fake: pgt references contiguous guest memory block, not a pgtable 2062 * 2063 * Returns 0 if the shadow page table was found and -EAGAIN if the page 2064 * table was not found. 2065 * 2066 * Called with sg->mm->mmap_lock in read. 2067 */ 2068 int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, 2069 unsigned long *pgt, int *dat_protection, 2070 int *fake) 2071 { 2072 unsigned long *table; 2073 struct page *page; 2074 int rc; 2075 2076 BUG_ON(!gmap_is_shadow(sg)); 2077 spin_lock(&sg->guest_table_lock); 2078 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */ 2079 if (table && !(*table & _SEGMENT_ENTRY_INVALID)) { 2080 /* Shadow page tables are full pages (pte+pgste) */ 2081 page = pfn_to_page(*table >> PAGE_SHIFT); 2082 *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE; 2083 *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT); 2084 *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE); 2085 rc = 0; 2086 } else { 2087 rc = -EAGAIN; 2088 } 2089 spin_unlock(&sg->guest_table_lock); 2090 return rc; 2091 2092 } 2093 EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup); 2094 2095 /** 2096 * gmap_shadow_pgt - instantiate a shadow page table 2097 * @sg: pointer to the shadow guest address space structure 2098 * @saddr: faulting address in the shadow gmap 2099 * @pgt: parent gmap address of the page table to get shadowed 2100 * @fake: pgt references contiguous guest memory block, not a pgtable 2101 * 2102 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the 2103 * shadow table structure is incomplete, -ENOMEM if out of memory, 2104 * -EFAULT if an address in the parent gmap could not be resolved and 2105 * 2106 * Called with gmap->mm->mmap_lock in read 2107 */ 2108 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, 2109 int fake) 2110 { 2111 unsigned long raddr, origin; 2112 unsigned long *table; 2113 struct ptdesc *ptdesc; 2114 phys_addr_t s_pgt; 2115 int rc; 2116 2117 BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE)); 2118 /* Allocate a shadow page table */ 2119 ptdesc = page_table_alloc_pgste(sg->mm); 2120 if (!ptdesc) 2121 return -ENOMEM; 2122 ptdesc->pt_index = pgt & _SEGMENT_ENTRY_ORIGIN; 2123 if (fake) 2124 ptdesc->pt_index |= GMAP_SHADOW_FAKE_TABLE; 2125 s_pgt = page_to_phys(ptdesc_page(ptdesc)); 2126 /* Install shadow page table */ 2127 spin_lock(&sg->guest_table_lock); 2128 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */ 2129 if (!table) { 2130 rc = -EAGAIN; /* Race with unshadow */ 2131 goto out_free; 2132 } 2133 if (!(*table & _SEGMENT_ENTRY_INVALID)) { 2134 rc = 0; /* Already established */ 2135 goto out_free; 2136 } else if (*table & _SEGMENT_ENTRY_ORIGIN) { 2137 rc = -EAGAIN; /* Race with shadow */ 2138 goto out_free; 2139 } 2140 /* mark as invalid as long as the parent table is not protected */ 2141 *table = (unsigned long) s_pgt | _SEGMENT_ENTRY | 2142 (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID; 2143 list_add(&ptdesc->pt_list, &sg->pt_list); 2144 if (fake) { 2145 /* nothing to protect for fake tables */ 2146 *table &= ~_SEGMENT_ENTRY_INVALID; 2147 spin_unlock(&sg->guest_table_lock); 2148 return 0; 2149 } 2150 spin_unlock(&sg->guest_table_lock); 2151 /* Make pgt read-only in parent gmap page table (not the pgste) */ 2152 raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT; 2153 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK; 2154 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE); 2155 spin_lock(&sg->guest_table_lock); 2156 if (!rc) { 2157 table = gmap_table_walk(sg, saddr, 1); 2158 if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) != s_pgt) 2159 rc = -EAGAIN; /* Race with unshadow */ 2160 else 2161 *table &= ~_SEGMENT_ENTRY_INVALID; 2162 } else { 2163 gmap_unshadow_pgt(sg, raddr); 2164 } 2165 spin_unlock(&sg->guest_table_lock); 2166 return rc; 2167 out_free: 2168 spin_unlock(&sg->guest_table_lock); 2169 page_table_free_pgste(ptdesc); 2170 return rc; 2171 2172 } 2173 EXPORT_SYMBOL_GPL(gmap_shadow_pgt); 2174 2175 /** 2176 * gmap_shadow_page - create a shadow page mapping 2177 * @sg: pointer to the shadow guest address space structure 2178 * @saddr: faulting address in the shadow gmap 2179 * @pte: pte in parent gmap address space to get shadowed 2180 * 2181 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the 2182 * shadow table structure is incomplete, -ENOMEM if out of memory and 2183 * -EFAULT if an address in the parent gmap could not be resolved. 2184 * 2185 * Called with sg->mm->mmap_lock in read. 2186 */ 2187 int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte) 2188 { 2189 struct gmap *parent; 2190 struct gmap_rmap *rmap; 2191 unsigned long vmaddr, paddr; 2192 spinlock_t *ptl; 2193 pte_t *sptep, *tptep; 2194 int prot; 2195 int rc; 2196 2197 BUG_ON(!gmap_is_shadow(sg)); 2198 parent = sg->parent; 2199 prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE; 2200 2201 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT); 2202 if (!rmap) 2203 return -ENOMEM; 2204 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE; 2205 2206 while (1) { 2207 paddr = pte_val(pte) & PAGE_MASK; 2208 vmaddr = __gmap_translate(parent, paddr); 2209 if (IS_ERR_VALUE(vmaddr)) { 2210 rc = vmaddr; 2211 break; 2212 } 2213 rc = radix_tree_preload(GFP_KERNEL_ACCOUNT); 2214 if (rc) 2215 break; 2216 rc = -EAGAIN; 2217 sptep = gmap_pte_op_walk(parent, paddr, &ptl); 2218 if (sptep) { 2219 spin_lock(&sg->guest_table_lock); 2220 /* Get page table pointer */ 2221 tptep = (pte_t *) gmap_table_walk(sg, saddr, 0); 2222 if (!tptep) { 2223 spin_unlock(&sg->guest_table_lock); 2224 gmap_pte_op_end(sptep, ptl); 2225 radix_tree_preload_end(); 2226 break; 2227 } 2228 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte); 2229 if (rc > 0) { 2230 /* Success and a new mapping */ 2231 gmap_insert_rmap(sg, vmaddr, rmap); 2232 rmap = NULL; 2233 rc = 0; 2234 } 2235 gmap_pte_op_end(sptep, ptl); 2236 spin_unlock(&sg->guest_table_lock); 2237 } 2238 radix_tree_preload_end(); 2239 if (!rc) 2240 break; 2241 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot); 2242 if (rc) 2243 break; 2244 } 2245 kfree(rmap); 2246 return rc; 2247 } 2248 EXPORT_SYMBOL_GPL(gmap_shadow_page); 2249 2250 /* 2251 * gmap_shadow_notify - handle notifications for shadow gmap 2252 * 2253 * Called with sg->parent->shadow_lock. 2254 */ 2255 static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr, 2256 unsigned long gaddr) 2257 { 2258 struct gmap_rmap *rmap, *rnext, *head; 2259 unsigned long start, end, bits, raddr; 2260 2261 BUG_ON(!gmap_is_shadow(sg)); 2262 2263 spin_lock(&sg->guest_table_lock); 2264 if (sg->removed) { 2265 spin_unlock(&sg->guest_table_lock); 2266 return; 2267 } 2268 /* Check for top level table */ 2269 start = sg->orig_asce & _ASCE_ORIGIN; 2270 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE; 2271 if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start && 2272 gaddr < end) { 2273 /* The complete shadow table has to go */ 2274 gmap_unshadow(sg); 2275 spin_unlock(&sg->guest_table_lock); 2276 list_del(&sg->list); 2277 gmap_put(sg); 2278 return; 2279 } 2280 /* Remove the page table tree from on specific entry */ 2281 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT); 2282 gmap_for_each_rmap_safe(rmap, rnext, head) { 2283 bits = rmap->raddr & _SHADOW_RMAP_MASK; 2284 raddr = rmap->raddr ^ bits; 2285 switch (bits) { 2286 case _SHADOW_RMAP_REGION1: 2287 gmap_unshadow_r2t(sg, raddr); 2288 break; 2289 case _SHADOW_RMAP_REGION2: 2290 gmap_unshadow_r3t(sg, raddr); 2291 break; 2292 case _SHADOW_RMAP_REGION3: 2293 gmap_unshadow_sgt(sg, raddr); 2294 break; 2295 case _SHADOW_RMAP_SEGMENT: 2296 gmap_unshadow_pgt(sg, raddr); 2297 break; 2298 case _SHADOW_RMAP_PGTABLE: 2299 gmap_unshadow_page(sg, raddr); 2300 break; 2301 } 2302 kfree(rmap); 2303 } 2304 spin_unlock(&sg->guest_table_lock); 2305 } 2306 2307 /** 2308 * ptep_notify - call all invalidation callbacks for a specific pte. 2309 * @mm: pointer to the process mm_struct 2310 * @vmaddr: virtual address in the process address space 2311 * @pte: pointer to the page table entry 2312 * @bits: bits from the pgste that caused the notify call 2313 * 2314 * This function is assumed to be called with the page table lock held 2315 * for the pte to notify. 2316 */ 2317 void ptep_notify(struct mm_struct *mm, unsigned long vmaddr, 2318 pte_t *pte, unsigned long bits) 2319 { 2320 unsigned long offset, gaddr = 0; 2321 unsigned long *table; 2322 struct gmap *gmap, *sg, *next; 2323 2324 offset = ((unsigned long) pte) & (255 * sizeof(pte_t)); 2325 offset = offset * (PAGE_SIZE / sizeof(pte_t)); 2326 rcu_read_lock(); 2327 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) { 2328 spin_lock(&gmap->guest_table_lock); 2329 table = radix_tree_lookup(&gmap->host_to_guest, 2330 vmaddr >> PMD_SHIFT); 2331 if (table) 2332 gaddr = __gmap_segment_gaddr(table) + offset; 2333 spin_unlock(&gmap->guest_table_lock); 2334 if (!table) 2335 continue; 2336 2337 if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) { 2338 spin_lock(&gmap->shadow_lock); 2339 list_for_each_entry_safe(sg, next, 2340 &gmap->children, list) 2341 gmap_shadow_notify(sg, vmaddr, gaddr); 2342 spin_unlock(&gmap->shadow_lock); 2343 } 2344 if (bits & PGSTE_IN_BIT) 2345 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1); 2346 } 2347 rcu_read_unlock(); 2348 } 2349 EXPORT_SYMBOL_GPL(ptep_notify); 2350 2351 static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp, 2352 unsigned long gaddr) 2353 { 2354 set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN))); 2355 gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1); 2356 } 2357 2358 /** 2359 * gmap_pmdp_xchg - exchange a gmap pmd with another 2360 * @gmap: pointer to the guest address space structure 2361 * @pmdp: pointer to the pmd entry 2362 * @new: replacement entry 2363 * @gaddr: the affected guest address 2364 * 2365 * This function is assumed to be called with the guest_table_lock 2366 * held. 2367 */ 2368 static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new, 2369 unsigned long gaddr) 2370 { 2371 gaddr &= HPAGE_MASK; 2372 pmdp_notify_gmap(gmap, pmdp, gaddr); 2373 new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_GMAP_IN)); 2374 if (MACHINE_HAS_TLB_GUEST) 2375 __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce, 2376 IDTE_GLOBAL); 2377 else if (MACHINE_HAS_IDTE) 2378 __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL); 2379 else 2380 __pmdp_csp(pmdp); 2381 set_pmd(pmdp, new); 2382 } 2383 2384 static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr, 2385 int purge) 2386 { 2387 pmd_t *pmdp; 2388 struct gmap *gmap; 2389 unsigned long gaddr; 2390 2391 rcu_read_lock(); 2392 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) { 2393 spin_lock(&gmap->guest_table_lock); 2394 pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest, 2395 vmaddr >> PMD_SHIFT); 2396 if (pmdp) { 2397 gaddr = __gmap_segment_gaddr((unsigned long *)pmdp); 2398 pmdp_notify_gmap(gmap, pmdp, gaddr); 2399 WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE | 2400 _SEGMENT_ENTRY_GMAP_UC | 2401 _SEGMENT_ENTRY)); 2402 if (purge) 2403 __pmdp_csp(pmdp); 2404 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 2405 } 2406 spin_unlock(&gmap->guest_table_lock); 2407 } 2408 rcu_read_unlock(); 2409 } 2410 2411 /** 2412 * gmap_pmdp_invalidate - invalidate all affected guest pmd entries without 2413 * flushing 2414 * @mm: pointer to the process mm_struct 2415 * @vmaddr: virtual address in the process address space 2416 */ 2417 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr) 2418 { 2419 gmap_pmdp_clear(mm, vmaddr, 0); 2420 } 2421 EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate); 2422 2423 /** 2424 * gmap_pmdp_csp - csp all affected guest pmd entries 2425 * @mm: pointer to the process mm_struct 2426 * @vmaddr: virtual address in the process address space 2427 */ 2428 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr) 2429 { 2430 gmap_pmdp_clear(mm, vmaddr, 1); 2431 } 2432 EXPORT_SYMBOL_GPL(gmap_pmdp_csp); 2433 2434 /** 2435 * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry 2436 * @mm: pointer to the process mm_struct 2437 * @vmaddr: virtual address in the process address space 2438 */ 2439 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr) 2440 { 2441 unsigned long *entry, gaddr; 2442 struct gmap *gmap; 2443 pmd_t *pmdp; 2444 2445 rcu_read_lock(); 2446 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) { 2447 spin_lock(&gmap->guest_table_lock); 2448 entry = radix_tree_delete(&gmap->host_to_guest, 2449 vmaddr >> PMD_SHIFT); 2450 if (entry) { 2451 pmdp = (pmd_t *)entry; 2452 gaddr = __gmap_segment_gaddr(entry); 2453 pmdp_notify_gmap(gmap, pmdp, gaddr); 2454 WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE | 2455 _SEGMENT_ENTRY_GMAP_UC | 2456 _SEGMENT_ENTRY)); 2457 if (MACHINE_HAS_TLB_GUEST) 2458 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE, 2459 gmap->asce, IDTE_LOCAL); 2460 else if (MACHINE_HAS_IDTE) 2461 __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL); 2462 *entry = _SEGMENT_ENTRY_EMPTY; 2463 } 2464 spin_unlock(&gmap->guest_table_lock); 2465 } 2466 rcu_read_unlock(); 2467 } 2468 EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local); 2469 2470 /** 2471 * gmap_pmdp_idte_global - invalidate and clear a guest pmd entry 2472 * @mm: pointer to the process mm_struct 2473 * @vmaddr: virtual address in the process address space 2474 */ 2475 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr) 2476 { 2477 unsigned long *entry, gaddr; 2478 struct gmap *gmap; 2479 pmd_t *pmdp; 2480 2481 rcu_read_lock(); 2482 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) { 2483 spin_lock(&gmap->guest_table_lock); 2484 entry = radix_tree_delete(&gmap->host_to_guest, 2485 vmaddr >> PMD_SHIFT); 2486 if (entry) { 2487 pmdp = (pmd_t *)entry; 2488 gaddr = __gmap_segment_gaddr(entry); 2489 pmdp_notify_gmap(gmap, pmdp, gaddr); 2490 WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE | 2491 _SEGMENT_ENTRY_GMAP_UC | 2492 _SEGMENT_ENTRY)); 2493 if (MACHINE_HAS_TLB_GUEST) 2494 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE, 2495 gmap->asce, IDTE_GLOBAL); 2496 else if (MACHINE_HAS_IDTE) 2497 __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL); 2498 else 2499 __pmdp_csp(pmdp); 2500 *entry = _SEGMENT_ENTRY_EMPTY; 2501 } 2502 spin_unlock(&gmap->guest_table_lock); 2503 } 2504 rcu_read_unlock(); 2505 } 2506 EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global); 2507 2508 /** 2509 * gmap_test_and_clear_dirty_pmd - test and reset segment dirty status 2510 * @gmap: pointer to guest address space 2511 * @pmdp: pointer to the pmd to be tested 2512 * @gaddr: virtual address in the guest address space 2513 * 2514 * This function is assumed to be called with the guest_table_lock 2515 * held. 2516 */ 2517 static bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp, 2518 unsigned long gaddr) 2519 { 2520 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) 2521 return false; 2522 2523 /* Already protected memory, which did not change is clean */ 2524 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT && 2525 !(pmd_val(*pmdp) & _SEGMENT_ENTRY_GMAP_UC)) 2526 return false; 2527 2528 /* Clear UC indication and reset protection */ 2529 set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_UC))); 2530 gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0); 2531 return true; 2532 } 2533 2534 /** 2535 * gmap_sync_dirty_log_pmd - set bitmap based on dirty status of segment 2536 * @gmap: pointer to guest address space 2537 * @bitmap: dirty bitmap for this pmd 2538 * @gaddr: virtual address in the guest address space 2539 * @vmaddr: virtual address in the host address space 2540 * 2541 * This function is assumed to be called with the guest_table_lock 2542 * held. 2543 */ 2544 void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4], 2545 unsigned long gaddr, unsigned long vmaddr) 2546 { 2547 int i; 2548 pmd_t *pmdp; 2549 pte_t *ptep; 2550 spinlock_t *ptl; 2551 2552 pmdp = gmap_pmd_op_walk(gmap, gaddr); 2553 if (!pmdp) 2554 return; 2555 2556 if (pmd_leaf(*pmdp)) { 2557 if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr)) 2558 bitmap_fill(bitmap, _PAGE_ENTRIES); 2559 } else { 2560 for (i = 0; i < _PAGE_ENTRIES; i++, vmaddr += PAGE_SIZE) { 2561 ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl); 2562 if (!ptep) 2563 continue; 2564 if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep)) 2565 set_bit(i, bitmap); 2566 pte_unmap_unlock(ptep, ptl); 2567 } 2568 } 2569 gmap_pmd_op_end(gmap, pmdp); 2570 } 2571 EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd); 2572 2573 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2574 static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr, 2575 unsigned long end, struct mm_walk *walk) 2576 { 2577 struct vm_area_struct *vma = walk->vma; 2578 2579 split_huge_pmd(vma, pmd, addr); 2580 return 0; 2581 } 2582 2583 static const struct mm_walk_ops thp_split_walk_ops = { 2584 .pmd_entry = thp_split_walk_pmd_entry, 2585 .walk_lock = PGWALK_WRLOCK_VERIFY, 2586 }; 2587 2588 static inline void thp_split_mm(struct mm_struct *mm) 2589 { 2590 struct vm_area_struct *vma; 2591 VMA_ITERATOR(vmi, mm, 0); 2592 2593 for_each_vma(vmi, vma) { 2594 vm_flags_mod(vma, VM_NOHUGEPAGE, VM_HUGEPAGE); 2595 walk_page_vma(vma, &thp_split_walk_ops, NULL); 2596 } 2597 mm->def_flags |= VM_NOHUGEPAGE; 2598 } 2599 #else 2600 static inline void thp_split_mm(struct mm_struct *mm) 2601 { 2602 } 2603 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2604 2605 /* 2606 * switch on pgstes for its userspace process (for kvm) 2607 */ 2608 int s390_enable_sie(void) 2609 { 2610 struct mm_struct *mm = current->mm; 2611 2612 /* Do we have pgstes? if yes, we are done */ 2613 if (mm_has_pgste(mm)) 2614 return 0; 2615 /* Fail if the page tables are 2K */ 2616 if (!mm_alloc_pgste(mm)) 2617 return -EINVAL; 2618 mmap_write_lock(mm); 2619 mm->context.has_pgste = 1; 2620 /* split thp mappings and disable thp for future mappings */ 2621 thp_split_mm(mm); 2622 mmap_write_unlock(mm); 2623 return 0; 2624 } 2625 EXPORT_SYMBOL_GPL(s390_enable_sie); 2626 2627 static int find_zeropage_pte_entry(pte_t *pte, unsigned long addr, 2628 unsigned long end, struct mm_walk *walk) 2629 { 2630 unsigned long *found_addr = walk->private; 2631 2632 /* Return 1 of the page is a zeropage. */ 2633 if (is_zero_pfn(pte_pfn(*pte))) { 2634 /* 2635 * Shared zeropage in e.g., a FS DAX mapping? We cannot do the 2636 * right thing and likely don't care: FAULT_FLAG_UNSHARE 2637 * currently only works in COW mappings, which is also where 2638 * mm_forbids_zeropage() is checked. 2639 */ 2640 if (!is_cow_mapping(walk->vma->vm_flags)) 2641 return -EFAULT; 2642 2643 *found_addr = addr; 2644 return 1; 2645 } 2646 return 0; 2647 } 2648 2649 static const struct mm_walk_ops find_zeropage_ops = { 2650 .pte_entry = find_zeropage_pte_entry, 2651 .walk_lock = PGWALK_WRLOCK, 2652 }; 2653 2654 /* 2655 * Unshare all shared zeropages, replacing them by anonymous pages. Note that 2656 * we cannot simply zap all shared zeropages, because this could later 2657 * trigger unexpected userfaultfd missing events. 2658 * 2659 * This must be called after mm->context.allow_cow_sharing was 2660 * set to 0, to avoid future mappings of shared zeropages. 2661 * 2662 * mm contracts with s390, that even if mm were to remove a page table, 2663 * and racing with walk_page_range_vma() calling pte_offset_map_lock() 2664 * would fail, it will never insert a page table containing empty zero 2665 * pages once mm_forbids_zeropage(mm) i.e. 2666 * mm->context.allow_cow_sharing is set to 0. 2667 */ 2668 static int __s390_unshare_zeropages(struct mm_struct *mm) 2669 { 2670 struct vm_area_struct *vma; 2671 VMA_ITERATOR(vmi, mm, 0); 2672 unsigned long addr; 2673 vm_fault_t fault; 2674 int rc; 2675 2676 for_each_vma(vmi, vma) { 2677 /* 2678 * We could only look at COW mappings, but it's more future 2679 * proof to catch unexpected zeropages in other mappings and 2680 * fail. 2681 */ 2682 if ((vma->vm_flags & VM_PFNMAP) || is_vm_hugetlb_page(vma)) 2683 continue; 2684 addr = vma->vm_start; 2685 2686 retry: 2687 rc = walk_page_range_vma(vma, addr, vma->vm_end, 2688 &find_zeropage_ops, &addr); 2689 if (rc < 0) 2690 return rc; 2691 else if (!rc) 2692 continue; 2693 2694 /* addr was updated by find_zeropage_pte_entry() */ 2695 fault = handle_mm_fault(vma, addr, 2696 FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE, 2697 NULL); 2698 if (fault & VM_FAULT_OOM) 2699 return -ENOMEM; 2700 /* 2701 * See break_ksm(): even after handle_mm_fault() returned 0, we 2702 * must start the lookup from the current address, because 2703 * handle_mm_fault() may back out if there's any difficulty. 2704 * 2705 * VM_FAULT_SIGBUS and VM_FAULT_SIGSEGV are unexpected but 2706 * maybe they could trigger in the future on concurrent 2707 * truncation. In that case, the shared zeropage would be gone 2708 * and we can simply retry and make progress. 2709 */ 2710 cond_resched(); 2711 goto retry; 2712 } 2713 2714 return 0; 2715 } 2716 2717 static int __s390_disable_cow_sharing(struct mm_struct *mm) 2718 { 2719 int rc; 2720 2721 if (!mm->context.allow_cow_sharing) 2722 return 0; 2723 2724 mm->context.allow_cow_sharing = 0; 2725 2726 /* Replace all shared zeropages by anonymous pages. */ 2727 rc = __s390_unshare_zeropages(mm); 2728 /* 2729 * Make sure to disable KSM (if enabled for the whole process or 2730 * individual VMAs). Note that nothing currently hinders user space 2731 * from re-enabling it. 2732 */ 2733 if (!rc) 2734 rc = ksm_disable(mm); 2735 if (rc) 2736 mm->context.allow_cow_sharing = 1; 2737 return rc; 2738 } 2739 2740 /* 2741 * Disable most COW-sharing of memory pages for the whole process: 2742 * (1) Disable KSM and unmerge/unshare any KSM pages. 2743 * (2) Disallow shared zeropages and unshare any zerpages that are mapped. 2744 * 2745 * Not that we currently don't bother with COW-shared pages that are shared 2746 * with parent/child processes due to fork(). 2747 */ 2748 int s390_disable_cow_sharing(void) 2749 { 2750 int rc; 2751 2752 mmap_write_lock(current->mm); 2753 rc = __s390_disable_cow_sharing(current->mm); 2754 mmap_write_unlock(current->mm); 2755 return rc; 2756 } 2757 EXPORT_SYMBOL_GPL(s390_disable_cow_sharing); 2758 2759 /* 2760 * Enable storage key handling from now on and initialize the storage 2761 * keys with the default key. 2762 */ 2763 static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr, 2764 unsigned long next, struct mm_walk *walk) 2765 { 2766 /* Clear storage key */ 2767 ptep_zap_key(walk->mm, addr, pte); 2768 return 0; 2769 } 2770 2771 /* 2772 * Give a chance to schedule after setting a key to 256 pages. 2773 * We only hold the mm lock, which is a rwsem and the kvm srcu. 2774 * Both can sleep. 2775 */ 2776 static int __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr, 2777 unsigned long next, struct mm_walk *walk) 2778 { 2779 cond_resched(); 2780 return 0; 2781 } 2782 2783 static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr, 2784 unsigned long hmask, unsigned long next, 2785 struct mm_walk *walk) 2786 { 2787 pmd_t *pmd = (pmd_t *)pte; 2788 unsigned long start, end; 2789 struct folio *folio = page_folio(pmd_page(*pmd)); 2790 2791 /* 2792 * The write check makes sure we do not set a key on shared 2793 * memory. This is needed as the walker does not differentiate 2794 * between actual guest memory and the process executable or 2795 * shared libraries. 2796 */ 2797 if (pmd_val(*pmd) & _SEGMENT_ENTRY_INVALID || 2798 !(pmd_val(*pmd) & _SEGMENT_ENTRY_WRITE)) 2799 return 0; 2800 2801 start = pmd_val(*pmd) & HPAGE_MASK; 2802 end = start + HPAGE_SIZE; 2803 __storage_key_init_range(start, end); 2804 set_bit(PG_arch_1, &folio->flags); 2805 cond_resched(); 2806 return 0; 2807 } 2808 2809 static const struct mm_walk_ops enable_skey_walk_ops = { 2810 .hugetlb_entry = __s390_enable_skey_hugetlb, 2811 .pte_entry = __s390_enable_skey_pte, 2812 .pmd_entry = __s390_enable_skey_pmd, 2813 .walk_lock = PGWALK_WRLOCK, 2814 }; 2815 2816 int s390_enable_skey(void) 2817 { 2818 struct mm_struct *mm = current->mm; 2819 int rc = 0; 2820 2821 mmap_write_lock(mm); 2822 if (mm_uses_skeys(mm)) 2823 goto out_up; 2824 2825 mm->context.uses_skeys = 1; 2826 rc = __s390_disable_cow_sharing(mm); 2827 if (rc) { 2828 mm->context.uses_skeys = 0; 2829 goto out_up; 2830 } 2831 walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL); 2832 2833 out_up: 2834 mmap_write_unlock(mm); 2835 return rc; 2836 } 2837 EXPORT_SYMBOL_GPL(s390_enable_skey); 2838 2839 /* 2840 * Reset CMMA state, make all pages stable again. 2841 */ 2842 static int __s390_reset_cmma(pte_t *pte, unsigned long addr, 2843 unsigned long next, struct mm_walk *walk) 2844 { 2845 ptep_zap_unused(walk->mm, addr, pte, 1); 2846 return 0; 2847 } 2848 2849 static const struct mm_walk_ops reset_cmma_walk_ops = { 2850 .pte_entry = __s390_reset_cmma, 2851 .walk_lock = PGWALK_WRLOCK, 2852 }; 2853 2854 void s390_reset_cmma(struct mm_struct *mm) 2855 { 2856 mmap_write_lock(mm); 2857 walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL); 2858 mmap_write_unlock(mm); 2859 } 2860 EXPORT_SYMBOL_GPL(s390_reset_cmma); 2861 2862 #define GATHER_GET_PAGES 32 2863 2864 struct reset_walk_state { 2865 unsigned long next; 2866 unsigned long count; 2867 unsigned long pfns[GATHER_GET_PAGES]; 2868 }; 2869 2870 static int s390_gather_pages(pte_t *ptep, unsigned long addr, 2871 unsigned long next, struct mm_walk *walk) 2872 { 2873 struct reset_walk_state *p = walk->private; 2874 pte_t pte = READ_ONCE(*ptep); 2875 2876 if (pte_present(pte)) { 2877 /* we have a reference from the mapping, take an extra one */ 2878 get_page(phys_to_page(pte_val(pte))); 2879 p->pfns[p->count] = phys_to_pfn(pte_val(pte)); 2880 p->next = next; 2881 p->count++; 2882 } 2883 return p->count >= GATHER_GET_PAGES; 2884 } 2885 2886 static const struct mm_walk_ops gather_pages_ops = { 2887 .pte_entry = s390_gather_pages, 2888 .walk_lock = PGWALK_RDLOCK, 2889 }; 2890 2891 /* 2892 * Call the Destroy secure page UVC on each page in the given array of PFNs. 2893 * Each page needs to have an extra reference, which will be released here. 2894 */ 2895 void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns) 2896 { 2897 struct folio *folio; 2898 unsigned long i; 2899 2900 for (i = 0; i < count; i++) { 2901 folio = pfn_folio(pfns[i]); 2902 /* we always have an extra reference */ 2903 uv_destroy_folio(folio); 2904 /* get rid of the extra reference */ 2905 folio_put(folio); 2906 cond_resched(); 2907 } 2908 } 2909 EXPORT_SYMBOL_GPL(s390_uv_destroy_pfns); 2910 2911 /** 2912 * __s390_uv_destroy_range - Call the destroy secure page UVC on each page 2913 * in the given range of the given address space. 2914 * @mm: the mm to operate on 2915 * @start: the start of the range 2916 * @end: the end of the range 2917 * @interruptible: if not 0, stop when a fatal signal is received 2918 * 2919 * Walk the given range of the given address space and call the destroy 2920 * secure page UVC on each page. Optionally exit early if a fatal signal is 2921 * pending. 2922 * 2923 * Return: 0 on success, -EINTR if the function stopped before completing 2924 */ 2925 int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start, 2926 unsigned long end, bool interruptible) 2927 { 2928 struct reset_walk_state state = { .next = start }; 2929 int r = 1; 2930 2931 while (r > 0) { 2932 state.count = 0; 2933 mmap_read_lock(mm); 2934 r = walk_page_range(mm, state.next, end, &gather_pages_ops, &state); 2935 mmap_read_unlock(mm); 2936 cond_resched(); 2937 s390_uv_destroy_pfns(state.count, state.pfns); 2938 if (interruptible && fatal_signal_pending(current)) 2939 return -EINTR; 2940 } 2941 return 0; 2942 } 2943 EXPORT_SYMBOL_GPL(__s390_uv_destroy_range); 2944 2945 /** 2946 * s390_unlist_old_asce - Remove the topmost level of page tables from the 2947 * list of page tables of the gmap. 2948 * @gmap: the gmap whose table is to be removed 2949 * 2950 * On s390x, KVM keeps a list of all pages containing the page tables of the 2951 * gmap (the CRST list). This list is used at tear down time to free all 2952 * pages that are now not needed anymore. 2953 * 2954 * This function removes the topmost page of the tree (the one pointed to by 2955 * the ASCE) from the CRST list. 2956 * 2957 * This means that it will not be freed when the VM is torn down, and needs 2958 * to be handled separately by the caller, unless a leak is actually 2959 * intended. Notice that this function will only remove the page from the 2960 * list, the page will still be used as a top level page table (and ASCE). 2961 */ 2962 void s390_unlist_old_asce(struct gmap *gmap) 2963 { 2964 struct page *old; 2965 2966 old = virt_to_page(gmap->table); 2967 spin_lock(&gmap->guest_table_lock); 2968 list_del(&old->lru); 2969 /* 2970 * Sometimes the topmost page might need to be "removed" multiple 2971 * times, for example if the VM is rebooted into secure mode several 2972 * times concurrently, or if s390_replace_asce fails after calling 2973 * s390_remove_old_asce and is attempted again later. In that case 2974 * the old asce has been removed from the list, and therefore it 2975 * will not be freed when the VM terminates, but the ASCE is still 2976 * in use and still pointed to. 2977 * A subsequent call to replace_asce will follow the pointer and try 2978 * to remove the same page from the list again. 2979 * Therefore it's necessary that the page of the ASCE has valid 2980 * pointers, so list_del can work (and do nothing) without 2981 * dereferencing stale or invalid pointers. 2982 */ 2983 INIT_LIST_HEAD(&old->lru); 2984 spin_unlock(&gmap->guest_table_lock); 2985 } 2986 EXPORT_SYMBOL_GPL(s390_unlist_old_asce); 2987 2988 /** 2989 * s390_replace_asce - Try to replace the current ASCE of a gmap with a copy 2990 * @gmap: the gmap whose ASCE needs to be replaced 2991 * 2992 * If the ASCE is a SEGMENT type then this function will return -EINVAL, 2993 * otherwise the pointers in the host_to_guest radix tree will keep pointing 2994 * to the wrong pages, causing use-after-free and memory corruption. 2995 * If the allocation of the new top level page table fails, the ASCE is not 2996 * replaced. 2997 * In any case, the old ASCE is always removed from the gmap CRST list. 2998 * Therefore the caller has to make sure to save a pointer to it 2999 * beforehand, unless a leak is actually intended. 3000 */ 3001 int s390_replace_asce(struct gmap *gmap) 3002 { 3003 unsigned long asce; 3004 struct page *page; 3005 void *table; 3006 3007 s390_unlist_old_asce(gmap); 3008 3009 /* Replacing segment type ASCEs would cause serious issues */ 3010 if ((gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT) 3011 return -EINVAL; 3012 3013 page = gmap_alloc_crst(); 3014 if (!page) 3015 return -ENOMEM; 3016 page->index = 0; 3017 table = page_to_virt(page); 3018 memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT)); 3019 3020 /* 3021 * The caller has to deal with the old ASCE, but here we make sure 3022 * the new one is properly added to the CRST list, so that 3023 * it will be freed when the VM is torn down. 3024 */ 3025 spin_lock(&gmap->guest_table_lock); 3026 list_add(&page->lru, &gmap->crst_list); 3027 spin_unlock(&gmap->guest_table_lock); 3028 3029 /* Set new table origin while preserving existing ASCE control bits */ 3030 asce = (gmap->asce & ~_ASCE_ORIGIN) | __pa(table); 3031 WRITE_ONCE(gmap->asce, asce); 3032 WRITE_ONCE(gmap->mm->context.gmap_asce, asce); 3033 WRITE_ONCE(gmap->table, table); 3034 3035 return 0; 3036 } 3037 EXPORT_SYMBOL_GPL(s390_replace_asce); 3038