1 /* 2 * fs/dax.c - Direct Access filesystem code 3 * Copyright (c) 2013-2014 Intel Corporation 4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 */ 16 17 #include <linux/atomic.h> 18 #include <linux/blkdev.h> 19 #include <linux/buffer_head.h> 20 #include <linux/dax.h> 21 #include <linux/fs.h> 22 #include <linux/genhd.h> 23 #include <linux/highmem.h> 24 #include <linux/memcontrol.h> 25 #include <linux/mm.h> 26 #include <linux/mutex.h> 27 #include <linux/pagevec.h> 28 #include <linux/pmem.h> 29 #include <linux/sched.h> 30 #include <linux/sched/signal.h> 31 #include <linux/uio.h> 32 #include <linux/vmstat.h> 33 #include <linux/pfn_t.h> 34 #include <linux/sizes.h> 35 #include <linux/mmu_notifier.h> 36 #include <linux/iomap.h> 37 #include "internal.h" 38 39 #define CREATE_TRACE_POINTS 40 #include <trace/events/fs_dax.h> 41 42 /* We choose 4096 entries - same as per-zone page wait tables */ 43 #define DAX_WAIT_TABLE_BITS 12 44 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 45 46 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 47 48 static int __init init_dax_wait_table(void) 49 { 50 int i; 51 52 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 53 init_waitqueue_head(wait_table + i); 54 return 0; 55 } 56 fs_initcall(init_dax_wait_table); 57 58 static int dax_is_pmd_entry(void *entry) 59 { 60 return (unsigned long)entry & RADIX_DAX_PMD; 61 } 62 63 static int dax_is_pte_entry(void *entry) 64 { 65 return !((unsigned long)entry & RADIX_DAX_PMD); 66 } 67 68 static int dax_is_zero_entry(void *entry) 69 { 70 return (unsigned long)entry & RADIX_DAX_HZP; 71 } 72 73 static int dax_is_empty_entry(void *entry) 74 { 75 return (unsigned long)entry & RADIX_DAX_EMPTY; 76 } 77 78 /* 79 * DAX radix tree locking 80 */ 81 struct exceptional_entry_key { 82 struct address_space *mapping; 83 pgoff_t entry_start; 84 }; 85 86 struct wait_exceptional_entry_queue { 87 wait_queue_entry_t wait; 88 struct exceptional_entry_key key; 89 }; 90 91 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, 92 pgoff_t index, void *entry, struct exceptional_entry_key *key) 93 { 94 unsigned long hash; 95 96 /* 97 * If 'entry' is a PMD, align the 'index' that we use for the wait 98 * queue to the start of that PMD. This ensures that all offsets in 99 * the range covered by the PMD map to the same bit lock. 100 */ 101 if (dax_is_pmd_entry(entry)) 102 index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1); 103 104 key->mapping = mapping; 105 key->entry_start = index; 106 107 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS); 108 return wait_table + hash; 109 } 110 111 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode, 112 int sync, void *keyp) 113 { 114 struct exceptional_entry_key *key = keyp; 115 struct wait_exceptional_entry_queue *ewait = 116 container_of(wait, struct wait_exceptional_entry_queue, wait); 117 118 if (key->mapping != ewait->key.mapping || 119 key->entry_start != ewait->key.entry_start) 120 return 0; 121 return autoremove_wake_function(wait, mode, sync, NULL); 122 } 123 124 /* 125 * Check whether the given slot is locked. The function must be called with 126 * mapping->tree_lock held 127 */ 128 static inline int slot_locked(struct address_space *mapping, void **slot) 129 { 130 unsigned long entry = (unsigned long) 131 radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 132 return entry & RADIX_DAX_ENTRY_LOCK; 133 } 134 135 /* 136 * Mark the given slot is locked. The function must be called with 137 * mapping->tree_lock held 138 */ 139 static inline void *lock_slot(struct address_space *mapping, void **slot) 140 { 141 unsigned long entry = (unsigned long) 142 radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 143 144 entry |= RADIX_DAX_ENTRY_LOCK; 145 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry); 146 return (void *)entry; 147 } 148 149 /* 150 * Mark the given slot is unlocked. The function must be called with 151 * mapping->tree_lock held 152 */ 153 static inline void *unlock_slot(struct address_space *mapping, void **slot) 154 { 155 unsigned long entry = (unsigned long) 156 radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 157 158 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK; 159 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry); 160 return (void *)entry; 161 } 162 163 /* 164 * Lookup entry in radix tree, wait for it to become unlocked if it is 165 * exceptional entry and return it. The caller must call 166 * put_unlocked_mapping_entry() when he decided not to lock the entry or 167 * put_locked_mapping_entry() when he locked the entry and now wants to 168 * unlock it. 169 * 170 * The function must be called with mapping->tree_lock held. 171 */ 172 static void *get_unlocked_mapping_entry(struct address_space *mapping, 173 pgoff_t index, void ***slotp) 174 { 175 void *entry, **slot; 176 struct wait_exceptional_entry_queue ewait; 177 wait_queue_head_t *wq; 178 179 init_wait(&ewait.wait); 180 ewait.wait.func = wake_exceptional_entry_func; 181 182 for (;;) { 183 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, 184 &slot); 185 if (!entry || !radix_tree_exceptional_entry(entry) || 186 !slot_locked(mapping, slot)) { 187 if (slotp) 188 *slotp = slot; 189 return entry; 190 } 191 192 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); 193 prepare_to_wait_exclusive(wq, &ewait.wait, 194 TASK_UNINTERRUPTIBLE); 195 spin_unlock_irq(&mapping->tree_lock); 196 schedule(); 197 finish_wait(wq, &ewait.wait); 198 spin_lock_irq(&mapping->tree_lock); 199 } 200 } 201 202 static void dax_unlock_mapping_entry(struct address_space *mapping, 203 pgoff_t index) 204 { 205 void *entry, **slot; 206 207 spin_lock_irq(&mapping->tree_lock); 208 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot); 209 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) || 210 !slot_locked(mapping, slot))) { 211 spin_unlock_irq(&mapping->tree_lock); 212 return; 213 } 214 unlock_slot(mapping, slot); 215 spin_unlock_irq(&mapping->tree_lock); 216 dax_wake_mapping_entry_waiter(mapping, index, entry, false); 217 } 218 219 static void put_locked_mapping_entry(struct address_space *mapping, 220 pgoff_t index, void *entry) 221 { 222 if (!radix_tree_exceptional_entry(entry)) { 223 unlock_page(entry); 224 put_page(entry); 225 } else { 226 dax_unlock_mapping_entry(mapping, index); 227 } 228 } 229 230 /* 231 * Called when we are done with radix tree entry we looked up via 232 * get_unlocked_mapping_entry() and which we didn't lock in the end. 233 */ 234 static void put_unlocked_mapping_entry(struct address_space *mapping, 235 pgoff_t index, void *entry) 236 { 237 if (!radix_tree_exceptional_entry(entry)) 238 return; 239 240 /* We have to wake up next waiter for the radix tree entry lock */ 241 dax_wake_mapping_entry_waiter(mapping, index, entry, false); 242 } 243 244 /* 245 * Find radix tree entry at given index. If it points to a page, return with 246 * the page locked. If it points to the exceptional entry, return with the 247 * radix tree entry locked. If the radix tree doesn't contain given index, 248 * create empty exceptional entry for the index and return with it locked. 249 * 250 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will 251 * either return that locked entry or will return an error. This error will 252 * happen if there are any 4k entries (either zero pages or DAX entries) 253 * within the 2MiB range that we are requesting. 254 * 255 * We always favor 4k entries over 2MiB entries. There isn't a flow where we 256 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB 257 * insertion will fail if it finds any 4k entries already in the tree, and a 258 * 4k insertion will cause an existing 2MiB entry to be unmapped and 259 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as 260 * well as 2MiB empty entries. 261 * 262 * The exception to this downgrade path is for 2MiB DAX PMD entries that have 263 * real storage backing them. We will leave these real 2MiB DAX entries in 264 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry. 265 * 266 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 267 * persistent memory the benefit is doubtful. We can add that later if we can 268 * show it helps. 269 */ 270 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, 271 unsigned long size_flag) 272 { 273 bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */ 274 void *entry, **slot; 275 276 restart: 277 spin_lock_irq(&mapping->tree_lock); 278 entry = get_unlocked_mapping_entry(mapping, index, &slot); 279 280 if (entry) { 281 if (size_flag & RADIX_DAX_PMD) { 282 if (!radix_tree_exceptional_entry(entry) || 283 dax_is_pte_entry(entry)) { 284 put_unlocked_mapping_entry(mapping, index, 285 entry); 286 entry = ERR_PTR(-EEXIST); 287 goto out_unlock; 288 } 289 } else { /* trying to grab a PTE entry */ 290 if (radix_tree_exceptional_entry(entry) && 291 dax_is_pmd_entry(entry) && 292 (dax_is_zero_entry(entry) || 293 dax_is_empty_entry(entry))) { 294 pmd_downgrade = true; 295 } 296 } 297 } 298 299 /* No entry for given index? Make sure radix tree is big enough. */ 300 if (!entry || pmd_downgrade) { 301 int err; 302 303 if (pmd_downgrade) { 304 /* 305 * Make sure 'entry' remains valid while we drop 306 * mapping->tree_lock. 307 */ 308 entry = lock_slot(mapping, slot); 309 } 310 311 spin_unlock_irq(&mapping->tree_lock); 312 /* 313 * Besides huge zero pages the only other thing that gets 314 * downgraded are empty entries which don't need to be 315 * unmapped. 316 */ 317 if (pmd_downgrade && dax_is_zero_entry(entry)) 318 unmap_mapping_range(mapping, 319 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0); 320 321 err = radix_tree_preload( 322 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); 323 if (err) { 324 if (pmd_downgrade) 325 put_locked_mapping_entry(mapping, index, entry); 326 return ERR_PTR(err); 327 } 328 spin_lock_irq(&mapping->tree_lock); 329 330 if (!entry) { 331 /* 332 * We needed to drop the page_tree lock while calling 333 * radix_tree_preload() and we didn't have an entry to 334 * lock. See if another thread inserted an entry at 335 * our index during this time. 336 */ 337 entry = __radix_tree_lookup(&mapping->page_tree, index, 338 NULL, &slot); 339 if (entry) { 340 radix_tree_preload_end(); 341 spin_unlock_irq(&mapping->tree_lock); 342 goto restart; 343 } 344 } 345 346 if (pmd_downgrade) { 347 radix_tree_delete(&mapping->page_tree, index); 348 mapping->nrexceptional--; 349 dax_wake_mapping_entry_waiter(mapping, index, entry, 350 true); 351 } 352 353 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY); 354 355 err = __radix_tree_insert(&mapping->page_tree, index, 356 dax_radix_order(entry), entry); 357 radix_tree_preload_end(); 358 if (err) { 359 spin_unlock_irq(&mapping->tree_lock); 360 /* 361 * Our insertion of a DAX entry failed, most likely 362 * because we were inserting a PMD entry and it 363 * collided with a PTE sized entry at a different 364 * index in the PMD range. We haven't inserted 365 * anything into the radix tree and have no waiters to 366 * wake. 367 */ 368 return ERR_PTR(err); 369 } 370 /* Good, we have inserted empty locked entry into the tree. */ 371 mapping->nrexceptional++; 372 spin_unlock_irq(&mapping->tree_lock); 373 return entry; 374 } 375 /* Normal page in radix tree? */ 376 if (!radix_tree_exceptional_entry(entry)) { 377 struct page *page = entry; 378 379 get_page(page); 380 spin_unlock_irq(&mapping->tree_lock); 381 lock_page(page); 382 /* Page got truncated? Retry... */ 383 if (unlikely(page->mapping != mapping)) { 384 unlock_page(page); 385 put_page(page); 386 goto restart; 387 } 388 return page; 389 } 390 entry = lock_slot(mapping, slot); 391 out_unlock: 392 spin_unlock_irq(&mapping->tree_lock); 393 return entry; 394 } 395 396 /* 397 * We do not necessarily hold the mapping->tree_lock when we call this 398 * function so it is possible that 'entry' is no longer a valid item in the 399 * radix tree. This is okay because all we really need to do is to find the 400 * correct waitqueue where tasks might be waiting for that old 'entry' and 401 * wake them. 402 */ 403 void dax_wake_mapping_entry_waiter(struct address_space *mapping, 404 pgoff_t index, void *entry, bool wake_all) 405 { 406 struct exceptional_entry_key key; 407 wait_queue_head_t *wq; 408 409 wq = dax_entry_waitqueue(mapping, index, entry, &key); 410 411 /* 412 * Checking for locked entry and prepare_to_wait_exclusive() happens 413 * under mapping->tree_lock, ditto for entry handling in our callers. 414 * So at this point all tasks that could have seen our entry locked 415 * must be in the waitqueue and the following check will see them. 416 */ 417 if (waitqueue_active(wq)) 418 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 419 } 420 421 static int __dax_invalidate_mapping_entry(struct address_space *mapping, 422 pgoff_t index, bool trunc) 423 { 424 int ret = 0; 425 void *entry; 426 struct radix_tree_root *page_tree = &mapping->page_tree; 427 428 spin_lock_irq(&mapping->tree_lock); 429 entry = get_unlocked_mapping_entry(mapping, index, NULL); 430 if (!entry || !radix_tree_exceptional_entry(entry)) 431 goto out; 432 if (!trunc && 433 (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) || 434 radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))) 435 goto out; 436 radix_tree_delete(page_tree, index); 437 mapping->nrexceptional--; 438 ret = 1; 439 out: 440 put_unlocked_mapping_entry(mapping, index, entry); 441 spin_unlock_irq(&mapping->tree_lock); 442 return ret; 443 } 444 /* 445 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree 446 * entry to get unlocked before deleting it. 447 */ 448 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 449 { 450 int ret = __dax_invalidate_mapping_entry(mapping, index, true); 451 452 /* 453 * This gets called from truncate / punch_hole path. As such, the caller 454 * must hold locks protecting against concurrent modifications of the 455 * radix tree (usually fs-private i_mmap_sem for writing). Since the 456 * caller has seen exceptional entry for this index, we better find it 457 * at that index as well... 458 */ 459 WARN_ON_ONCE(!ret); 460 return ret; 461 } 462 463 /* 464 * Invalidate exceptional DAX entry if it is clean. 465 */ 466 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 467 pgoff_t index) 468 { 469 return __dax_invalidate_mapping_entry(mapping, index, false); 470 } 471 472 /* 473 * The user has performed a load from a hole in the file. Allocating 474 * a new page in the file would cause excessive storage usage for 475 * workloads with sparse files. We allocate a page cache page instead. 476 * We'll kick it out of the page cache if it's ever written to, 477 * otherwise it will simply fall out of the page cache under memory 478 * pressure without ever having been dirtied. 479 */ 480 static int dax_load_hole(struct address_space *mapping, void **entry, 481 struct vm_fault *vmf) 482 { 483 struct inode *inode = mapping->host; 484 struct page *page; 485 int ret; 486 487 /* Hole page already exists? Return it... */ 488 if (!radix_tree_exceptional_entry(*entry)) { 489 page = *entry; 490 goto finish_fault; 491 } 492 493 /* This will replace locked radix tree entry with a hole page */ 494 page = find_or_create_page(mapping, vmf->pgoff, 495 vmf->gfp_mask | __GFP_ZERO); 496 if (!page) { 497 ret = VM_FAULT_OOM; 498 goto out; 499 } 500 501 finish_fault: 502 vmf->page = page; 503 ret = finish_fault(vmf); 504 vmf->page = NULL; 505 *entry = page; 506 if (!ret) { 507 /* Grab reference for PTE that is now referencing the page */ 508 get_page(page); 509 ret = VM_FAULT_NOPAGE; 510 } 511 out: 512 trace_dax_load_hole(inode, vmf, ret); 513 return ret; 514 } 515 516 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, 517 sector_t sector, size_t size, struct page *to, 518 unsigned long vaddr) 519 { 520 void *vto, *kaddr; 521 pgoff_t pgoff; 522 pfn_t pfn; 523 long rc; 524 int id; 525 526 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 527 if (rc) 528 return rc; 529 530 id = dax_read_lock(); 531 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); 532 if (rc < 0) { 533 dax_read_unlock(id); 534 return rc; 535 } 536 vto = kmap_atomic(to); 537 copy_user_page(vto, (void __force *)kaddr, vaddr, to); 538 kunmap_atomic(vto); 539 dax_read_unlock(id); 540 return 0; 541 } 542 543 /* 544 * By this point grab_mapping_entry() has ensured that we have a locked entry 545 * of the appropriate size so we don't have to worry about downgrading PMDs to 546 * PTEs. If we happen to be trying to insert a PTE and there is a PMD 547 * already in the tree, we will skip the insertion and just dirty the PMD as 548 * appropriate. 549 */ 550 static void *dax_insert_mapping_entry(struct address_space *mapping, 551 struct vm_fault *vmf, 552 void *entry, sector_t sector, 553 unsigned long flags) 554 { 555 struct radix_tree_root *page_tree = &mapping->page_tree; 556 int error = 0; 557 bool hole_fill = false; 558 void *new_entry; 559 pgoff_t index = vmf->pgoff; 560 561 if (vmf->flags & FAULT_FLAG_WRITE) 562 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 563 564 /* Replacing hole page with block mapping? */ 565 if (!radix_tree_exceptional_entry(entry)) { 566 hole_fill = true; 567 /* 568 * Unmap the page now before we remove it from page cache below. 569 * The page is locked so it cannot be faulted in again. 570 */ 571 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, 572 PAGE_SIZE, 0); 573 error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM); 574 if (error) 575 return ERR_PTR(error); 576 } else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) { 577 /* replacing huge zero page with PMD block mapping */ 578 unmap_mapping_range(mapping, 579 (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0); 580 } 581 582 spin_lock_irq(&mapping->tree_lock); 583 new_entry = dax_radix_locked_entry(sector, flags); 584 585 if (hole_fill) { 586 __delete_from_page_cache(entry, NULL); 587 /* Drop pagecache reference */ 588 put_page(entry); 589 error = __radix_tree_insert(page_tree, index, 590 dax_radix_order(new_entry), new_entry); 591 if (error) { 592 new_entry = ERR_PTR(error); 593 goto unlock; 594 } 595 mapping->nrexceptional++; 596 } else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 597 /* 598 * Only swap our new entry into the radix tree if the current 599 * entry is a zero page or an empty entry. If a normal PTE or 600 * PMD entry is already in the tree, we leave it alone. This 601 * means that if we are trying to insert a PTE and the 602 * existing entry is a PMD, we will just leave the PMD in the 603 * tree and dirty it if necessary. 604 */ 605 struct radix_tree_node *node; 606 void **slot; 607 void *ret; 608 609 ret = __radix_tree_lookup(page_tree, index, &node, &slot); 610 WARN_ON_ONCE(ret != entry); 611 __radix_tree_replace(page_tree, node, slot, 612 new_entry, NULL, NULL); 613 } 614 if (vmf->flags & FAULT_FLAG_WRITE) 615 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY); 616 unlock: 617 spin_unlock_irq(&mapping->tree_lock); 618 if (hole_fill) { 619 radix_tree_preload_end(); 620 /* 621 * We don't need hole page anymore, it has been replaced with 622 * locked radix tree entry now. 623 */ 624 if (mapping->a_ops->freepage) 625 mapping->a_ops->freepage(entry); 626 unlock_page(entry); 627 put_page(entry); 628 } 629 return new_entry; 630 } 631 632 static inline unsigned long 633 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma) 634 { 635 unsigned long address; 636 637 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 638 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 639 return address; 640 } 641 642 /* Walk all mappings of a given index of a file and writeprotect them */ 643 static void dax_mapping_entry_mkclean(struct address_space *mapping, 644 pgoff_t index, unsigned long pfn) 645 { 646 struct vm_area_struct *vma; 647 pte_t pte, *ptep = NULL; 648 pmd_t *pmdp = NULL; 649 spinlock_t *ptl; 650 bool changed; 651 652 i_mmap_lock_read(mapping); 653 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 654 unsigned long address; 655 656 cond_resched(); 657 658 if (!(vma->vm_flags & VM_SHARED)) 659 continue; 660 661 address = pgoff_address(index, vma); 662 changed = false; 663 if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl)) 664 continue; 665 666 if (pmdp) { 667 #ifdef CONFIG_FS_DAX_PMD 668 pmd_t pmd; 669 670 if (pfn != pmd_pfn(*pmdp)) 671 goto unlock_pmd; 672 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) 673 goto unlock_pmd; 674 675 flush_cache_page(vma, address, pfn); 676 pmd = pmdp_huge_clear_flush(vma, address, pmdp); 677 pmd = pmd_wrprotect(pmd); 678 pmd = pmd_mkclean(pmd); 679 set_pmd_at(vma->vm_mm, address, pmdp, pmd); 680 changed = true; 681 unlock_pmd: 682 spin_unlock(ptl); 683 #endif 684 } else { 685 if (pfn != pte_pfn(*ptep)) 686 goto unlock_pte; 687 if (!pte_dirty(*ptep) && !pte_write(*ptep)) 688 goto unlock_pte; 689 690 flush_cache_page(vma, address, pfn); 691 pte = ptep_clear_flush(vma, address, ptep); 692 pte = pte_wrprotect(pte); 693 pte = pte_mkclean(pte); 694 set_pte_at(vma->vm_mm, address, ptep, pte); 695 changed = true; 696 unlock_pte: 697 pte_unmap_unlock(ptep, ptl); 698 } 699 700 if (changed) 701 mmu_notifier_invalidate_page(vma->vm_mm, address); 702 } 703 i_mmap_unlock_read(mapping); 704 } 705 706 static int dax_writeback_one(struct block_device *bdev, 707 struct dax_device *dax_dev, struct address_space *mapping, 708 pgoff_t index, void *entry) 709 { 710 struct radix_tree_root *page_tree = &mapping->page_tree; 711 void *entry2, **slot, *kaddr; 712 long ret = 0, id; 713 sector_t sector; 714 pgoff_t pgoff; 715 size_t size; 716 pfn_t pfn; 717 718 /* 719 * A page got tagged dirty in DAX mapping? Something is seriously 720 * wrong. 721 */ 722 if (WARN_ON(!radix_tree_exceptional_entry(entry))) 723 return -EIO; 724 725 spin_lock_irq(&mapping->tree_lock); 726 entry2 = get_unlocked_mapping_entry(mapping, index, &slot); 727 /* Entry got punched out / reallocated? */ 728 if (!entry2 || !radix_tree_exceptional_entry(entry2)) 729 goto put_unlocked; 730 /* 731 * Entry got reallocated elsewhere? No need to writeback. We have to 732 * compare sectors as we must not bail out due to difference in lockbit 733 * or entry type. 734 */ 735 if (dax_radix_sector(entry2) != dax_radix_sector(entry)) 736 goto put_unlocked; 737 if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 738 dax_is_zero_entry(entry))) { 739 ret = -EIO; 740 goto put_unlocked; 741 } 742 743 /* Another fsync thread may have already written back this entry */ 744 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)) 745 goto put_unlocked; 746 /* Lock the entry to serialize with page faults */ 747 entry = lock_slot(mapping, slot); 748 /* 749 * We can clear the tag now but we have to be careful so that concurrent 750 * dax_writeback_one() calls for the same index cannot finish before we 751 * actually flush the caches. This is achieved as the calls will look 752 * at the entry only under tree_lock and once they do that they will 753 * see the entry locked and wait for it to unlock. 754 */ 755 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE); 756 spin_unlock_irq(&mapping->tree_lock); 757 758 /* 759 * Even if dax_writeback_mapping_range() was given a wbc->range_start 760 * in the middle of a PMD, the 'index' we are given will be aligned to 761 * the start index of the PMD, as will the sector we pull from 762 * 'entry'. This allows us to flush for PMD_SIZE and not have to 763 * worry about partial PMD writebacks. 764 */ 765 sector = dax_radix_sector(entry); 766 size = PAGE_SIZE << dax_radix_order(entry); 767 768 id = dax_read_lock(); 769 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 770 if (ret) 771 goto dax_unlock; 772 773 /* 774 * dax_direct_access() may sleep, so cannot hold tree_lock over 775 * its invocation. 776 */ 777 ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn); 778 if (ret < 0) 779 goto dax_unlock; 780 781 if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) { 782 ret = -EIO; 783 goto dax_unlock; 784 } 785 786 dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn)); 787 wb_cache_pmem(kaddr, size); 788 /* 789 * After we have flushed the cache, we can clear the dirty tag. There 790 * cannot be new dirty data in the pfn after the flush has completed as 791 * the pfn mappings are writeprotected and fault waits for mapping 792 * entry lock. 793 */ 794 spin_lock_irq(&mapping->tree_lock); 795 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY); 796 spin_unlock_irq(&mapping->tree_lock); 797 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT); 798 dax_unlock: 799 dax_read_unlock(id); 800 put_locked_mapping_entry(mapping, index, entry); 801 return ret; 802 803 put_unlocked: 804 put_unlocked_mapping_entry(mapping, index, entry2); 805 spin_unlock_irq(&mapping->tree_lock); 806 return ret; 807 } 808 809 /* 810 * Flush the mapping to the persistent domain within the byte range of [start, 811 * end]. This is required by data integrity operations to ensure file data is 812 * on persistent storage prior to completion of the operation. 813 */ 814 int dax_writeback_mapping_range(struct address_space *mapping, 815 struct block_device *bdev, struct writeback_control *wbc) 816 { 817 struct inode *inode = mapping->host; 818 pgoff_t start_index, end_index; 819 pgoff_t indices[PAGEVEC_SIZE]; 820 struct dax_device *dax_dev; 821 struct pagevec pvec; 822 bool done = false; 823 int i, ret = 0; 824 825 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 826 return -EIO; 827 828 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 829 return 0; 830 831 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 832 if (!dax_dev) 833 return -EIO; 834 835 start_index = wbc->range_start >> PAGE_SHIFT; 836 end_index = wbc->range_end >> PAGE_SHIFT; 837 838 trace_dax_writeback_range(inode, start_index, end_index); 839 840 tag_pages_for_writeback(mapping, start_index, end_index); 841 842 pagevec_init(&pvec, 0); 843 while (!done) { 844 pvec.nr = find_get_entries_tag(mapping, start_index, 845 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE, 846 pvec.pages, indices); 847 848 if (pvec.nr == 0) 849 break; 850 851 for (i = 0; i < pvec.nr; i++) { 852 if (indices[i] > end_index) { 853 done = true; 854 break; 855 } 856 857 ret = dax_writeback_one(bdev, dax_dev, mapping, 858 indices[i], pvec.pages[i]); 859 if (ret < 0) 860 goto out; 861 } 862 start_index = indices[pvec.nr - 1] + 1; 863 } 864 out: 865 put_dax(dax_dev); 866 trace_dax_writeback_range_done(inode, start_index, end_index); 867 return (ret < 0 ? ret : 0); 868 } 869 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 870 871 static int dax_insert_mapping(struct address_space *mapping, 872 struct block_device *bdev, struct dax_device *dax_dev, 873 sector_t sector, size_t size, void **entryp, 874 struct vm_area_struct *vma, struct vm_fault *vmf) 875 { 876 unsigned long vaddr = vmf->address; 877 void *entry = *entryp; 878 void *ret, *kaddr; 879 pgoff_t pgoff; 880 int id, rc; 881 pfn_t pfn; 882 883 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 884 if (rc) 885 return rc; 886 887 id = dax_read_lock(); 888 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); 889 if (rc < 0) { 890 dax_read_unlock(id); 891 return rc; 892 } 893 dax_read_unlock(id); 894 895 ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0); 896 if (IS_ERR(ret)) 897 return PTR_ERR(ret); 898 *entryp = ret; 899 900 trace_dax_insert_mapping(mapping->host, vmf, ret); 901 return vm_insert_mixed(vma, vaddr, pfn); 902 } 903 904 /** 905 * dax_pfn_mkwrite - handle first write to DAX page 906 * @vmf: The description of the fault 907 */ 908 int dax_pfn_mkwrite(struct vm_fault *vmf) 909 { 910 struct file *file = vmf->vma->vm_file; 911 struct address_space *mapping = file->f_mapping; 912 struct inode *inode = mapping->host; 913 void *entry, **slot; 914 pgoff_t index = vmf->pgoff; 915 916 spin_lock_irq(&mapping->tree_lock); 917 entry = get_unlocked_mapping_entry(mapping, index, &slot); 918 if (!entry || !radix_tree_exceptional_entry(entry)) { 919 if (entry) 920 put_unlocked_mapping_entry(mapping, index, entry); 921 spin_unlock_irq(&mapping->tree_lock); 922 trace_dax_pfn_mkwrite_no_entry(inode, vmf, VM_FAULT_NOPAGE); 923 return VM_FAULT_NOPAGE; 924 } 925 radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY); 926 entry = lock_slot(mapping, slot); 927 spin_unlock_irq(&mapping->tree_lock); 928 /* 929 * If we race with somebody updating the PTE and finish_mkwrite_fault() 930 * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry 931 * the fault in either case. 932 */ 933 finish_mkwrite_fault(vmf); 934 put_locked_mapping_entry(mapping, index, entry); 935 trace_dax_pfn_mkwrite(inode, vmf, VM_FAULT_NOPAGE); 936 return VM_FAULT_NOPAGE; 937 } 938 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite); 939 940 static bool dax_range_is_aligned(struct block_device *bdev, 941 unsigned int offset, unsigned int length) 942 { 943 unsigned short sector_size = bdev_logical_block_size(bdev); 944 945 if (!IS_ALIGNED(offset, sector_size)) 946 return false; 947 if (!IS_ALIGNED(length, sector_size)) 948 return false; 949 950 return true; 951 } 952 953 int __dax_zero_page_range(struct block_device *bdev, 954 struct dax_device *dax_dev, sector_t sector, 955 unsigned int offset, unsigned int size) 956 { 957 if (dax_range_is_aligned(bdev, offset, size)) { 958 sector_t start_sector = sector + (offset >> 9); 959 960 return blkdev_issue_zeroout(bdev, start_sector, 961 size >> 9, GFP_NOFS, 0); 962 } else { 963 pgoff_t pgoff; 964 long rc, id; 965 void *kaddr; 966 pfn_t pfn; 967 968 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); 969 if (rc) 970 return rc; 971 972 id = dax_read_lock(); 973 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, 974 &pfn); 975 if (rc < 0) { 976 dax_read_unlock(id); 977 return rc; 978 } 979 clear_pmem(kaddr + offset, size); 980 dax_read_unlock(id); 981 } 982 return 0; 983 } 984 EXPORT_SYMBOL_GPL(__dax_zero_page_range); 985 986 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 987 { 988 return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9); 989 } 990 991 static loff_t 992 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 993 struct iomap *iomap) 994 { 995 struct block_device *bdev = iomap->bdev; 996 struct dax_device *dax_dev = iomap->dax_dev; 997 struct iov_iter *iter = data; 998 loff_t end = pos + length, done = 0; 999 ssize_t ret = 0; 1000 int id; 1001 1002 if (iov_iter_rw(iter) == READ) { 1003 end = min(end, i_size_read(inode)); 1004 if (pos >= end) 1005 return 0; 1006 1007 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 1008 return iov_iter_zero(min(length, end - pos), iter); 1009 } 1010 1011 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 1012 return -EIO; 1013 1014 /* 1015 * Write can allocate block for an area which has a hole page mapped 1016 * into page tables. We have to tear down these mappings so that data 1017 * written by write(2) is visible in mmap. 1018 */ 1019 if (iomap->flags & IOMAP_F_NEW) { 1020 invalidate_inode_pages2_range(inode->i_mapping, 1021 pos >> PAGE_SHIFT, 1022 (end - 1) >> PAGE_SHIFT); 1023 } 1024 1025 id = dax_read_lock(); 1026 while (pos < end) { 1027 unsigned offset = pos & (PAGE_SIZE - 1); 1028 const size_t size = ALIGN(length + offset, PAGE_SIZE); 1029 const sector_t sector = dax_iomap_sector(iomap, pos); 1030 ssize_t map_len; 1031 pgoff_t pgoff; 1032 void *kaddr; 1033 pfn_t pfn; 1034 1035 if (fatal_signal_pending(current)) { 1036 ret = -EINTR; 1037 break; 1038 } 1039 1040 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 1041 if (ret) 1042 break; 1043 1044 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 1045 &kaddr, &pfn); 1046 if (map_len < 0) { 1047 ret = map_len; 1048 break; 1049 } 1050 1051 map_len = PFN_PHYS(map_len); 1052 kaddr += offset; 1053 map_len -= offset; 1054 if (map_len > end - pos) 1055 map_len = end - pos; 1056 1057 if (iov_iter_rw(iter) == WRITE) 1058 map_len = copy_from_iter_pmem(kaddr, map_len, iter); 1059 else 1060 map_len = copy_to_iter(kaddr, map_len, iter); 1061 if (map_len <= 0) { 1062 ret = map_len ? map_len : -EFAULT; 1063 break; 1064 } 1065 1066 pos += map_len; 1067 length -= map_len; 1068 done += map_len; 1069 } 1070 dax_read_unlock(id); 1071 1072 return done ? done : ret; 1073 } 1074 1075 /** 1076 * dax_iomap_rw - Perform I/O to a DAX file 1077 * @iocb: The control block for this I/O 1078 * @iter: The addresses to do I/O from or to 1079 * @ops: iomap ops passed from the file system 1080 * 1081 * This function performs read and write operations to directly mapped 1082 * persistent memory. The callers needs to take care of read/write exclusion 1083 * and evicting any page cache pages in the region under I/O. 1084 */ 1085 ssize_t 1086 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 1087 const struct iomap_ops *ops) 1088 { 1089 struct address_space *mapping = iocb->ki_filp->f_mapping; 1090 struct inode *inode = mapping->host; 1091 loff_t pos = iocb->ki_pos, ret = 0, done = 0; 1092 unsigned flags = 0; 1093 1094 if (iov_iter_rw(iter) == WRITE) { 1095 lockdep_assert_held_exclusive(&inode->i_rwsem); 1096 flags |= IOMAP_WRITE; 1097 } else { 1098 lockdep_assert_held(&inode->i_rwsem); 1099 } 1100 1101 while (iov_iter_count(iter)) { 1102 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, 1103 iter, dax_iomap_actor); 1104 if (ret <= 0) 1105 break; 1106 pos += ret; 1107 done += ret; 1108 } 1109 1110 iocb->ki_pos += done; 1111 return done ? done : ret; 1112 } 1113 EXPORT_SYMBOL_GPL(dax_iomap_rw); 1114 1115 static int dax_fault_return(int error) 1116 { 1117 if (error == 0) 1118 return VM_FAULT_NOPAGE; 1119 if (error == -ENOMEM) 1120 return VM_FAULT_OOM; 1121 return VM_FAULT_SIGBUS; 1122 } 1123 1124 static int dax_iomap_pte_fault(struct vm_fault *vmf, 1125 const struct iomap_ops *ops) 1126 { 1127 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1128 struct inode *inode = mapping->host; 1129 unsigned long vaddr = vmf->address; 1130 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; 1131 sector_t sector; 1132 struct iomap iomap = { 0 }; 1133 unsigned flags = IOMAP_FAULT; 1134 int error, major = 0; 1135 int vmf_ret = 0; 1136 void *entry; 1137 1138 trace_dax_pte_fault(inode, vmf, vmf_ret); 1139 /* 1140 * Check whether offset isn't beyond end of file now. Caller is supposed 1141 * to hold locks serializing us with truncate / punch hole so this is 1142 * a reliable test. 1143 */ 1144 if (pos >= i_size_read(inode)) { 1145 vmf_ret = VM_FAULT_SIGBUS; 1146 goto out; 1147 } 1148 1149 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) 1150 flags |= IOMAP_WRITE; 1151 1152 entry = grab_mapping_entry(mapping, vmf->pgoff, 0); 1153 if (IS_ERR(entry)) { 1154 vmf_ret = dax_fault_return(PTR_ERR(entry)); 1155 goto out; 1156 } 1157 1158 /* 1159 * It is possible, particularly with mixed reads & writes to private 1160 * mappings, that we have raced with a PMD fault that overlaps with 1161 * the PTE we need to set up. If so just return and the fault will be 1162 * retried. 1163 */ 1164 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1165 vmf_ret = VM_FAULT_NOPAGE; 1166 goto unlock_entry; 1167 } 1168 1169 /* 1170 * Note that we don't bother to use iomap_apply here: DAX required 1171 * the file system block size to be equal the page size, which means 1172 * that we never have to deal with more than a single extent here. 1173 */ 1174 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); 1175 if (error) { 1176 vmf_ret = dax_fault_return(error); 1177 goto unlock_entry; 1178 } 1179 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 1180 error = -EIO; /* fs corruption? */ 1181 goto error_finish_iomap; 1182 } 1183 1184 sector = dax_iomap_sector(&iomap, pos); 1185 1186 if (vmf->cow_page) { 1187 switch (iomap.type) { 1188 case IOMAP_HOLE: 1189 case IOMAP_UNWRITTEN: 1190 clear_user_highpage(vmf->cow_page, vaddr); 1191 break; 1192 case IOMAP_MAPPED: 1193 error = copy_user_dax(iomap.bdev, iomap.dax_dev, 1194 sector, PAGE_SIZE, vmf->cow_page, vaddr); 1195 break; 1196 default: 1197 WARN_ON_ONCE(1); 1198 error = -EIO; 1199 break; 1200 } 1201 1202 if (error) 1203 goto error_finish_iomap; 1204 1205 __SetPageUptodate(vmf->cow_page); 1206 vmf_ret = finish_fault(vmf); 1207 if (!vmf_ret) 1208 vmf_ret = VM_FAULT_DONE_COW; 1209 goto finish_iomap; 1210 } 1211 1212 switch (iomap.type) { 1213 case IOMAP_MAPPED: 1214 if (iomap.flags & IOMAP_F_NEW) { 1215 count_vm_event(PGMAJFAULT); 1216 mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT); 1217 major = VM_FAULT_MAJOR; 1218 } 1219 error = dax_insert_mapping(mapping, iomap.bdev, iomap.dax_dev, 1220 sector, PAGE_SIZE, &entry, vmf->vma, vmf); 1221 /* -EBUSY is fine, somebody else faulted on the same PTE */ 1222 if (error == -EBUSY) 1223 error = 0; 1224 break; 1225 case IOMAP_UNWRITTEN: 1226 case IOMAP_HOLE: 1227 if (!(vmf->flags & FAULT_FLAG_WRITE)) { 1228 vmf_ret = dax_load_hole(mapping, &entry, vmf); 1229 goto finish_iomap; 1230 } 1231 /*FALLTHRU*/ 1232 default: 1233 WARN_ON_ONCE(1); 1234 error = -EIO; 1235 break; 1236 } 1237 1238 error_finish_iomap: 1239 vmf_ret = dax_fault_return(error) | major; 1240 finish_iomap: 1241 if (ops->iomap_end) { 1242 int copied = PAGE_SIZE; 1243 1244 if (vmf_ret & VM_FAULT_ERROR) 1245 copied = 0; 1246 /* 1247 * The fault is done by now and there's no way back (other 1248 * thread may be already happily using PTE we have installed). 1249 * Just ignore error from ->iomap_end since we cannot do much 1250 * with it. 1251 */ 1252 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); 1253 } 1254 unlock_entry: 1255 put_locked_mapping_entry(mapping, vmf->pgoff, entry); 1256 out: 1257 trace_dax_pte_fault_done(inode, vmf, vmf_ret); 1258 return vmf_ret; 1259 } 1260 1261 #ifdef CONFIG_FS_DAX_PMD 1262 /* 1263 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up 1264 * more often than one might expect in the below functions. 1265 */ 1266 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 1267 1268 static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap, 1269 loff_t pos, void **entryp) 1270 { 1271 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1272 const sector_t sector = dax_iomap_sector(iomap, pos); 1273 struct dax_device *dax_dev = iomap->dax_dev; 1274 struct block_device *bdev = iomap->bdev; 1275 struct inode *inode = mapping->host; 1276 const size_t size = PMD_SIZE; 1277 void *ret = NULL, *kaddr; 1278 long length = 0; 1279 pgoff_t pgoff; 1280 pfn_t pfn; 1281 int id; 1282 1283 if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0) 1284 goto fallback; 1285 1286 id = dax_read_lock(); 1287 length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); 1288 if (length < 0) 1289 goto unlock_fallback; 1290 length = PFN_PHYS(length); 1291 1292 if (length < size) 1293 goto unlock_fallback; 1294 if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR) 1295 goto unlock_fallback; 1296 if (!pfn_t_devmap(pfn)) 1297 goto unlock_fallback; 1298 dax_read_unlock(id); 1299 1300 ret = dax_insert_mapping_entry(mapping, vmf, *entryp, sector, 1301 RADIX_DAX_PMD); 1302 if (IS_ERR(ret)) 1303 goto fallback; 1304 *entryp = ret; 1305 1306 trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret); 1307 return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, 1308 pfn, vmf->flags & FAULT_FLAG_WRITE); 1309 1310 unlock_fallback: 1311 dax_read_unlock(id); 1312 fallback: 1313 trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret); 1314 return VM_FAULT_FALLBACK; 1315 } 1316 1317 static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap, 1318 void **entryp) 1319 { 1320 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1321 unsigned long pmd_addr = vmf->address & PMD_MASK; 1322 struct inode *inode = mapping->host; 1323 struct page *zero_page; 1324 void *ret = NULL; 1325 spinlock_t *ptl; 1326 pmd_t pmd_entry; 1327 1328 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1329 1330 if (unlikely(!zero_page)) 1331 goto fallback; 1332 1333 ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0, 1334 RADIX_DAX_PMD | RADIX_DAX_HZP); 1335 if (IS_ERR(ret)) 1336 goto fallback; 1337 *entryp = ret; 1338 1339 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1340 if (!pmd_none(*(vmf->pmd))) { 1341 spin_unlock(ptl); 1342 goto fallback; 1343 } 1344 1345 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1346 pmd_entry = pmd_mkhuge(pmd_entry); 1347 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1348 spin_unlock(ptl); 1349 trace_dax_pmd_load_hole(inode, vmf, zero_page, ret); 1350 return VM_FAULT_NOPAGE; 1351 1352 fallback: 1353 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret); 1354 return VM_FAULT_FALLBACK; 1355 } 1356 1357 static int dax_iomap_pmd_fault(struct vm_fault *vmf, 1358 const struct iomap_ops *ops) 1359 { 1360 struct vm_area_struct *vma = vmf->vma; 1361 struct address_space *mapping = vma->vm_file->f_mapping; 1362 unsigned long pmd_addr = vmf->address & PMD_MASK; 1363 bool write = vmf->flags & FAULT_FLAG_WRITE; 1364 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; 1365 struct inode *inode = mapping->host; 1366 int result = VM_FAULT_FALLBACK; 1367 struct iomap iomap = { 0 }; 1368 pgoff_t max_pgoff, pgoff; 1369 void *entry; 1370 loff_t pos; 1371 int error; 1372 1373 /* 1374 * Check whether offset isn't beyond end of file now. Caller is 1375 * supposed to hold locks serializing us with truncate / punch hole so 1376 * this is a reliable test. 1377 */ 1378 pgoff = linear_page_index(vma, pmd_addr); 1379 max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT; 1380 1381 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); 1382 1383 /* Fall back to PTEs if we're going to COW */ 1384 if (write && !(vma->vm_flags & VM_SHARED)) 1385 goto fallback; 1386 1387 /* If the PMD would extend outside the VMA */ 1388 if (pmd_addr < vma->vm_start) 1389 goto fallback; 1390 if ((pmd_addr + PMD_SIZE) > vma->vm_end) 1391 goto fallback; 1392 1393 if (pgoff > max_pgoff) { 1394 result = VM_FAULT_SIGBUS; 1395 goto out; 1396 } 1397 1398 /* If the PMD would extend beyond the file size */ 1399 if ((pgoff | PG_PMD_COLOUR) > max_pgoff) 1400 goto fallback; 1401 1402 /* 1403 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX 1404 * PMD or a HZP entry. If it can't (because a 4k page is already in 1405 * the tree, for instance), it will return -EEXIST and we just fall 1406 * back to 4k entries. 1407 */ 1408 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); 1409 if (IS_ERR(entry)) 1410 goto fallback; 1411 1412 /* 1413 * It is possible, particularly with mixed reads & writes to private 1414 * mappings, that we have raced with a PTE fault that overlaps with 1415 * the PMD we need to set up. If so just return and the fault will be 1416 * retried. 1417 */ 1418 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1419 !pmd_devmap(*vmf->pmd)) { 1420 result = 0; 1421 goto unlock_entry; 1422 } 1423 1424 /* 1425 * Note that we don't use iomap_apply here. We aren't doing I/O, only 1426 * setting up a mapping, so really we're using iomap_begin() as a way 1427 * to look up our filesystem block. 1428 */ 1429 pos = (loff_t)pgoff << PAGE_SHIFT; 1430 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); 1431 if (error) 1432 goto unlock_entry; 1433 1434 if (iomap.offset + iomap.length < pos + PMD_SIZE) 1435 goto finish_iomap; 1436 1437 switch (iomap.type) { 1438 case IOMAP_MAPPED: 1439 result = dax_pmd_insert_mapping(vmf, &iomap, pos, &entry); 1440 break; 1441 case IOMAP_UNWRITTEN: 1442 case IOMAP_HOLE: 1443 if (WARN_ON_ONCE(write)) 1444 break; 1445 result = dax_pmd_load_hole(vmf, &iomap, &entry); 1446 break; 1447 default: 1448 WARN_ON_ONCE(1); 1449 break; 1450 } 1451 1452 finish_iomap: 1453 if (ops->iomap_end) { 1454 int copied = PMD_SIZE; 1455 1456 if (result == VM_FAULT_FALLBACK) 1457 copied = 0; 1458 /* 1459 * The fault is done by now and there's no way back (other 1460 * thread may be already happily using PMD we have installed). 1461 * Just ignore error from ->iomap_end since we cannot do much 1462 * with it. 1463 */ 1464 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, 1465 &iomap); 1466 } 1467 unlock_entry: 1468 put_locked_mapping_entry(mapping, pgoff, entry); 1469 fallback: 1470 if (result == VM_FAULT_FALLBACK) { 1471 split_huge_pmd(vma, vmf->pmd, vmf->address); 1472 count_vm_event(THP_FAULT_FALLBACK); 1473 } 1474 out: 1475 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); 1476 return result; 1477 } 1478 #else 1479 static int dax_iomap_pmd_fault(struct vm_fault *vmf, 1480 const struct iomap_ops *ops) 1481 { 1482 return VM_FAULT_FALLBACK; 1483 } 1484 #endif /* CONFIG_FS_DAX_PMD */ 1485 1486 /** 1487 * dax_iomap_fault - handle a page fault on a DAX file 1488 * @vmf: The description of the fault 1489 * @ops: iomap ops passed from the file system 1490 * 1491 * When a page fault occurs, filesystems may call this helper in 1492 * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1493 * has done all the necessary locking for page fault to proceed 1494 * successfully. 1495 */ 1496 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1497 const struct iomap_ops *ops) 1498 { 1499 switch (pe_size) { 1500 case PE_SIZE_PTE: 1501 return dax_iomap_pte_fault(vmf, ops); 1502 case PE_SIZE_PMD: 1503 return dax_iomap_pmd_fault(vmf, ops); 1504 default: 1505 return VM_FAULT_FALLBACK; 1506 } 1507 } 1508 EXPORT_SYMBOL_GPL(dax_iomap_fault); 1509