1 /* 2 * fs/dax.c - Direct Access filesystem code 3 * Copyright (c) 2013-2014 Intel Corporation 4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 */ 16 17 #include <linux/atomic.h> 18 #include <linux/blkdev.h> 19 #include <linux/buffer_head.h> 20 #include <linux/dax.h> 21 #include <linux/fs.h> 22 #include <linux/genhd.h> 23 #include <linux/highmem.h> 24 #include <linux/memcontrol.h> 25 #include <linux/mm.h> 26 #include <linux/mutex.h> 27 #include <linux/pagevec.h> 28 #include <linux/sched.h> 29 #include <linux/sched/signal.h> 30 #include <linux/uio.h> 31 #include <linux/vmstat.h> 32 #include <linux/pfn_t.h> 33 #include <linux/sizes.h> 34 #include <linux/mmu_notifier.h> 35 #include <linux/iomap.h> 36 #include "internal.h" 37 38 #define CREATE_TRACE_POINTS 39 #include <trace/events/fs_dax.h> 40 41 /* We choose 4096 entries - same as per-zone page wait tables */ 42 #define DAX_WAIT_TABLE_BITS 12 43 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 44 45 /* The 'colour' (ie low bits) within a PMD of a page offset. */ 46 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 47 48 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 49 50 static int __init init_dax_wait_table(void) 51 { 52 int i; 53 54 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 55 init_waitqueue_head(wait_table + i); 56 return 0; 57 } 58 fs_initcall(init_dax_wait_table); 59 60 /* 61 * We use lowest available bit in exceptional entry for locking, one bit for 62 * the entry size (PMD) and two more to tell us if the entry is a zero page or 63 * an empty entry that is just used for locking. In total four special bits. 64 * 65 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 66 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 67 * block allocation. 68 */ 69 #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4) 70 #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) 71 #define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1)) 72 #define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2)) 73 #define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)) 74 75 static unsigned long dax_radix_sector(void *entry) 76 { 77 return (unsigned long)entry >> RADIX_DAX_SHIFT; 78 } 79 80 static void *dax_radix_locked_entry(sector_t sector, unsigned long flags) 81 { 82 return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags | 83 ((unsigned long)sector << RADIX_DAX_SHIFT) | 84 RADIX_DAX_ENTRY_LOCK); 85 } 86 87 static unsigned int dax_radix_order(void *entry) 88 { 89 if ((unsigned long)entry & RADIX_DAX_PMD) 90 return PMD_SHIFT - PAGE_SHIFT; 91 return 0; 92 } 93 94 static int dax_is_pmd_entry(void *entry) 95 { 96 return (unsigned long)entry & RADIX_DAX_PMD; 97 } 98 99 static int dax_is_pte_entry(void *entry) 100 { 101 return !((unsigned long)entry & RADIX_DAX_PMD); 102 } 103 104 static int dax_is_zero_entry(void *entry) 105 { 106 return (unsigned long)entry & RADIX_DAX_ZERO_PAGE; 107 } 108 109 static int dax_is_empty_entry(void *entry) 110 { 111 return (unsigned long)entry & RADIX_DAX_EMPTY; 112 } 113 114 /* 115 * DAX radix tree locking 116 */ 117 struct exceptional_entry_key { 118 struct address_space *mapping; 119 pgoff_t entry_start; 120 }; 121 122 struct wait_exceptional_entry_queue { 123 wait_queue_entry_t wait; 124 struct exceptional_entry_key key; 125 }; 126 127 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, 128 pgoff_t index, void *entry, struct exceptional_entry_key *key) 129 { 130 unsigned long hash; 131 132 /* 133 * If 'entry' is a PMD, align the 'index' that we use for the wait 134 * queue to the start of that PMD. This ensures that all offsets in 135 * the range covered by the PMD map to the same bit lock. 136 */ 137 if (dax_is_pmd_entry(entry)) 138 index &= ~PG_PMD_COLOUR; 139 140 key->mapping = mapping; 141 key->entry_start = index; 142 143 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS); 144 return wait_table + hash; 145 } 146 147 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode, 148 int sync, void *keyp) 149 { 150 struct exceptional_entry_key *key = keyp; 151 struct wait_exceptional_entry_queue *ewait = 152 container_of(wait, struct wait_exceptional_entry_queue, wait); 153 154 if (key->mapping != ewait->key.mapping || 155 key->entry_start != ewait->key.entry_start) 156 return 0; 157 return autoremove_wake_function(wait, mode, sync, NULL); 158 } 159 160 /* 161 * We do not necessarily hold the mapping->tree_lock when we call this 162 * function so it is possible that 'entry' is no longer a valid item in the 163 * radix tree. This is okay because all we really need to do is to find the 164 * correct waitqueue where tasks might be waiting for that old 'entry' and 165 * wake them. 166 */ 167 static void dax_wake_mapping_entry_waiter(struct address_space *mapping, 168 pgoff_t index, void *entry, bool wake_all) 169 { 170 struct exceptional_entry_key key; 171 wait_queue_head_t *wq; 172 173 wq = dax_entry_waitqueue(mapping, index, entry, &key); 174 175 /* 176 * Checking for locked entry and prepare_to_wait_exclusive() happens 177 * under mapping->tree_lock, ditto for entry handling in our callers. 178 * So at this point all tasks that could have seen our entry locked 179 * must be in the waitqueue and the following check will see them. 180 */ 181 if (waitqueue_active(wq)) 182 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 183 } 184 185 /* 186 * Check whether the given slot is locked. The function must be called with 187 * mapping->tree_lock held 188 */ 189 static inline int slot_locked(struct address_space *mapping, void **slot) 190 { 191 unsigned long entry = (unsigned long) 192 radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 193 return entry & RADIX_DAX_ENTRY_LOCK; 194 } 195 196 /* 197 * Mark the given slot is locked. The function must be called with 198 * mapping->tree_lock held 199 */ 200 static inline void *lock_slot(struct address_space *mapping, void **slot) 201 { 202 unsigned long entry = (unsigned long) 203 radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 204 205 entry |= RADIX_DAX_ENTRY_LOCK; 206 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry); 207 return (void *)entry; 208 } 209 210 /* 211 * Mark the given slot is unlocked. The function must be called with 212 * mapping->tree_lock held 213 */ 214 static inline void *unlock_slot(struct address_space *mapping, void **slot) 215 { 216 unsigned long entry = (unsigned long) 217 radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 218 219 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK; 220 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry); 221 return (void *)entry; 222 } 223 224 /* 225 * Lookup entry in radix tree, wait for it to become unlocked if it is 226 * exceptional entry and return it. The caller must call 227 * put_unlocked_mapping_entry() when he decided not to lock the entry or 228 * put_locked_mapping_entry() when he locked the entry and now wants to 229 * unlock it. 230 * 231 * The function must be called with mapping->tree_lock held. 232 */ 233 static void *get_unlocked_mapping_entry(struct address_space *mapping, 234 pgoff_t index, void ***slotp) 235 { 236 void *entry, **slot; 237 struct wait_exceptional_entry_queue ewait; 238 wait_queue_head_t *wq; 239 240 init_wait(&ewait.wait); 241 ewait.wait.func = wake_exceptional_entry_func; 242 243 for (;;) { 244 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, 245 &slot); 246 if (!entry || 247 WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) || 248 !slot_locked(mapping, slot)) { 249 if (slotp) 250 *slotp = slot; 251 return entry; 252 } 253 254 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); 255 prepare_to_wait_exclusive(wq, &ewait.wait, 256 TASK_UNINTERRUPTIBLE); 257 spin_unlock_irq(&mapping->tree_lock); 258 schedule(); 259 finish_wait(wq, &ewait.wait); 260 spin_lock_irq(&mapping->tree_lock); 261 } 262 } 263 264 static void dax_unlock_mapping_entry(struct address_space *mapping, 265 pgoff_t index) 266 { 267 void *entry, **slot; 268 269 spin_lock_irq(&mapping->tree_lock); 270 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot); 271 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) || 272 !slot_locked(mapping, slot))) { 273 spin_unlock_irq(&mapping->tree_lock); 274 return; 275 } 276 unlock_slot(mapping, slot); 277 spin_unlock_irq(&mapping->tree_lock); 278 dax_wake_mapping_entry_waiter(mapping, index, entry, false); 279 } 280 281 static void put_locked_mapping_entry(struct address_space *mapping, 282 pgoff_t index) 283 { 284 dax_unlock_mapping_entry(mapping, index); 285 } 286 287 /* 288 * Called when we are done with radix tree entry we looked up via 289 * get_unlocked_mapping_entry() and which we didn't lock in the end. 290 */ 291 static void put_unlocked_mapping_entry(struct address_space *mapping, 292 pgoff_t index, void *entry) 293 { 294 if (!entry) 295 return; 296 297 /* We have to wake up next waiter for the radix tree entry lock */ 298 dax_wake_mapping_entry_waiter(mapping, index, entry, false); 299 } 300 301 /* 302 * Find radix tree entry at given index. If it points to an exceptional entry, 303 * return it with the radix tree entry locked. If the radix tree doesn't 304 * contain given index, create an empty exceptional entry for the index and 305 * return with it locked. 306 * 307 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will 308 * either return that locked entry or will return an error. This error will 309 * happen if there are any 4k entries within the 2MiB range that we are 310 * requesting. 311 * 312 * We always favor 4k entries over 2MiB entries. There isn't a flow where we 313 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB 314 * insertion will fail if it finds any 4k entries already in the tree, and a 315 * 4k insertion will cause an existing 2MiB entry to be unmapped and 316 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as 317 * well as 2MiB empty entries. 318 * 319 * The exception to this downgrade path is for 2MiB DAX PMD entries that have 320 * real storage backing them. We will leave these real 2MiB DAX entries in 321 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry. 322 * 323 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 324 * persistent memory the benefit is doubtful. We can add that later if we can 325 * show it helps. 326 */ 327 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, 328 unsigned long size_flag) 329 { 330 bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */ 331 void *entry, **slot; 332 333 restart: 334 spin_lock_irq(&mapping->tree_lock); 335 entry = get_unlocked_mapping_entry(mapping, index, &slot); 336 337 if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) { 338 entry = ERR_PTR(-EIO); 339 goto out_unlock; 340 } 341 342 if (entry) { 343 if (size_flag & RADIX_DAX_PMD) { 344 if (dax_is_pte_entry(entry)) { 345 put_unlocked_mapping_entry(mapping, index, 346 entry); 347 entry = ERR_PTR(-EEXIST); 348 goto out_unlock; 349 } 350 } else { /* trying to grab a PTE entry */ 351 if (dax_is_pmd_entry(entry) && 352 (dax_is_zero_entry(entry) || 353 dax_is_empty_entry(entry))) { 354 pmd_downgrade = true; 355 } 356 } 357 } 358 359 /* No entry for given index? Make sure radix tree is big enough. */ 360 if (!entry || pmd_downgrade) { 361 int err; 362 363 if (pmd_downgrade) { 364 /* 365 * Make sure 'entry' remains valid while we drop 366 * mapping->tree_lock. 367 */ 368 entry = lock_slot(mapping, slot); 369 } 370 371 spin_unlock_irq(&mapping->tree_lock); 372 /* 373 * Besides huge zero pages the only other thing that gets 374 * downgraded are empty entries which don't need to be 375 * unmapped. 376 */ 377 if (pmd_downgrade && dax_is_zero_entry(entry)) 378 unmap_mapping_range(mapping, 379 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0); 380 381 err = radix_tree_preload( 382 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); 383 if (err) { 384 if (pmd_downgrade) 385 put_locked_mapping_entry(mapping, index); 386 return ERR_PTR(err); 387 } 388 spin_lock_irq(&mapping->tree_lock); 389 390 if (!entry) { 391 /* 392 * We needed to drop the page_tree lock while calling 393 * radix_tree_preload() and we didn't have an entry to 394 * lock. See if another thread inserted an entry at 395 * our index during this time. 396 */ 397 entry = __radix_tree_lookup(&mapping->page_tree, index, 398 NULL, &slot); 399 if (entry) { 400 radix_tree_preload_end(); 401 spin_unlock_irq(&mapping->tree_lock); 402 goto restart; 403 } 404 } 405 406 if (pmd_downgrade) { 407 radix_tree_delete(&mapping->page_tree, index); 408 mapping->nrexceptional--; 409 dax_wake_mapping_entry_waiter(mapping, index, entry, 410 true); 411 } 412 413 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY); 414 415 err = __radix_tree_insert(&mapping->page_tree, index, 416 dax_radix_order(entry), entry); 417 radix_tree_preload_end(); 418 if (err) { 419 spin_unlock_irq(&mapping->tree_lock); 420 /* 421 * Our insertion of a DAX entry failed, most likely 422 * because we were inserting a PMD entry and it 423 * collided with a PTE sized entry at a different 424 * index in the PMD range. We haven't inserted 425 * anything into the radix tree and have no waiters to 426 * wake. 427 */ 428 return ERR_PTR(err); 429 } 430 /* Good, we have inserted empty locked entry into the tree. */ 431 mapping->nrexceptional++; 432 spin_unlock_irq(&mapping->tree_lock); 433 return entry; 434 } 435 entry = lock_slot(mapping, slot); 436 out_unlock: 437 spin_unlock_irq(&mapping->tree_lock); 438 return entry; 439 } 440 441 static int __dax_invalidate_mapping_entry(struct address_space *mapping, 442 pgoff_t index, bool trunc) 443 { 444 int ret = 0; 445 void *entry; 446 struct radix_tree_root *page_tree = &mapping->page_tree; 447 448 spin_lock_irq(&mapping->tree_lock); 449 entry = get_unlocked_mapping_entry(mapping, index, NULL); 450 if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry))) 451 goto out; 452 if (!trunc && 453 (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) || 454 radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))) 455 goto out; 456 radix_tree_delete(page_tree, index); 457 mapping->nrexceptional--; 458 ret = 1; 459 out: 460 put_unlocked_mapping_entry(mapping, index, entry); 461 spin_unlock_irq(&mapping->tree_lock); 462 return ret; 463 } 464 /* 465 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree 466 * entry to get unlocked before deleting it. 467 */ 468 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 469 { 470 int ret = __dax_invalidate_mapping_entry(mapping, index, true); 471 472 /* 473 * This gets called from truncate / punch_hole path. As such, the caller 474 * must hold locks protecting against concurrent modifications of the 475 * radix tree (usually fs-private i_mmap_sem for writing). Since the 476 * caller has seen exceptional entry for this index, we better find it 477 * at that index as well... 478 */ 479 WARN_ON_ONCE(!ret); 480 return ret; 481 } 482 483 /* 484 * Invalidate exceptional DAX entry if it is clean. 485 */ 486 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 487 pgoff_t index) 488 { 489 return __dax_invalidate_mapping_entry(mapping, index, false); 490 } 491 492 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, 493 sector_t sector, size_t size, struct page *to, 494 unsigned long vaddr) 495 { 496 void *vto, *kaddr; 497 pgoff_t pgoff; 498 pfn_t pfn; 499 long rc; 500 int id; 501 502 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 503 if (rc) 504 return rc; 505 506 id = dax_read_lock(); 507 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); 508 if (rc < 0) { 509 dax_read_unlock(id); 510 return rc; 511 } 512 vto = kmap_atomic(to); 513 copy_user_page(vto, (void __force *)kaddr, vaddr, to); 514 kunmap_atomic(vto); 515 dax_read_unlock(id); 516 return 0; 517 } 518 519 /* 520 * By this point grab_mapping_entry() has ensured that we have a locked entry 521 * of the appropriate size so we don't have to worry about downgrading PMDs to 522 * PTEs. If we happen to be trying to insert a PTE and there is a PMD 523 * already in the tree, we will skip the insertion and just dirty the PMD as 524 * appropriate. 525 */ 526 static void *dax_insert_mapping_entry(struct address_space *mapping, 527 struct vm_fault *vmf, 528 void *entry, sector_t sector, 529 unsigned long flags, bool dirty) 530 { 531 struct radix_tree_root *page_tree = &mapping->page_tree; 532 void *new_entry; 533 pgoff_t index = vmf->pgoff; 534 535 if (dirty) 536 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 537 538 if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) { 539 /* we are replacing a zero page with block mapping */ 540 if (dax_is_pmd_entry(entry)) 541 unmap_mapping_range(mapping, 542 (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, 543 PMD_SIZE, 0); 544 else /* pte entry */ 545 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, 546 PAGE_SIZE, 0); 547 } 548 549 spin_lock_irq(&mapping->tree_lock); 550 new_entry = dax_radix_locked_entry(sector, flags); 551 552 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 553 /* 554 * Only swap our new entry into the radix tree if the current 555 * entry is a zero page or an empty entry. If a normal PTE or 556 * PMD entry is already in the tree, we leave it alone. This 557 * means that if we are trying to insert a PTE and the 558 * existing entry is a PMD, we will just leave the PMD in the 559 * tree and dirty it if necessary. 560 */ 561 struct radix_tree_node *node; 562 void **slot; 563 void *ret; 564 565 ret = __radix_tree_lookup(page_tree, index, &node, &slot); 566 WARN_ON_ONCE(ret != entry); 567 __radix_tree_replace(page_tree, node, slot, 568 new_entry, NULL); 569 entry = new_entry; 570 } 571 572 if (dirty) 573 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY); 574 575 spin_unlock_irq(&mapping->tree_lock); 576 return entry; 577 } 578 579 static inline unsigned long 580 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma) 581 { 582 unsigned long address; 583 584 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 585 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 586 return address; 587 } 588 589 /* Walk all mappings of a given index of a file and writeprotect them */ 590 static void dax_mapping_entry_mkclean(struct address_space *mapping, 591 pgoff_t index, unsigned long pfn) 592 { 593 struct vm_area_struct *vma; 594 pte_t pte, *ptep = NULL; 595 pmd_t *pmdp = NULL; 596 spinlock_t *ptl; 597 598 i_mmap_lock_read(mapping); 599 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 600 unsigned long address, start, end; 601 602 cond_resched(); 603 604 if (!(vma->vm_flags & VM_SHARED)) 605 continue; 606 607 address = pgoff_address(index, vma); 608 609 /* 610 * Note because we provide start/end to follow_pte_pmd it will 611 * call mmu_notifier_invalidate_range_start() on our behalf 612 * before taking any lock. 613 */ 614 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl)) 615 continue; 616 617 /* 618 * No need to call mmu_notifier_invalidate_range() as we are 619 * downgrading page table protection not changing it to point 620 * to a new page. 621 * 622 * See Documentation/vm/mmu_notifier.txt 623 */ 624 if (pmdp) { 625 #ifdef CONFIG_FS_DAX_PMD 626 pmd_t pmd; 627 628 if (pfn != pmd_pfn(*pmdp)) 629 goto unlock_pmd; 630 if (!pmd_dirty(*pmdp) 631 && !pmd_access_permitted(*pmdp, WRITE)) 632 goto unlock_pmd; 633 634 flush_cache_page(vma, address, pfn); 635 pmd = pmdp_huge_clear_flush(vma, address, pmdp); 636 pmd = pmd_wrprotect(pmd); 637 pmd = pmd_mkclean(pmd); 638 set_pmd_at(vma->vm_mm, address, pmdp, pmd); 639 unlock_pmd: 640 spin_unlock(ptl); 641 #endif 642 } else { 643 if (pfn != pte_pfn(*ptep)) 644 goto unlock_pte; 645 if (!pte_dirty(*ptep) && !pte_write(*ptep)) 646 goto unlock_pte; 647 648 flush_cache_page(vma, address, pfn); 649 pte = ptep_clear_flush(vma, address, ptep); 650 pte = pte_wrprotect(pte); 651 pte = pte_mkclean(pte); 652 set_pte_at(vma->vm_mm, address, ptep, pte); 653 unlock_pte: 654 pte_unmap_unlock(ptep, ptl); 655 } 656 657 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); 658 } 659 i_mmap_unlock_read(mapping); 660 } 661 662 static int dax_writeback_one(struct block_device *bdev, 663 struct dax_device *dax_dev, struct address_space *mapping, 664 pgoff_t index, void *entry) 665 { 666 struct radix_tree_root *page_tree = &mapping->page_tree; 667 void *entry2, **slot, *kaddr; 668 long ret = 0, id; 669 sector_t sector; 670 pgoff_t pgoff; 671 size_t size; 672 pfn_t pfn; 673 674 /* 675 * A page got tagged dirty in DAX mapping? Something is seriously 676 * wrong. 677 */ 678 if (WARN_ON(!radix_tree_exceptional_entry(entry))) 679 return -EIO; 680 681 spin_lock_irq(&mapping->tree_lock); 682 entry2 = get_unlocked_mapping_entry(mapping, index, &slot); 683 /* Entry got punched out / reallocated? */ 684 if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2))) 685 goto put_unlocked; 686 /* 687 * Entry got reallocated elsewhere? No need to writeback. We have to 688 * compare sectors as we must not bail out due to difference in lockbit 689 * or entry type. 690 */ 691 if (dax_radix_sector(entry2) != dax_radix_sector(entry)) 692 goto put_unlocked; 693 if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 694 dax_is_zero_entry(entry))) { 695 ret = -EIO; 696 goto put_unlocked; 697 } 698 699 /* Another fsync thread may have already written back this entry */ 700 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)) 701 goto put_unlocked; 702 /* Lock the entry to serialize with page faults */ 703 entry = lock_slot(mapping, slot); 704 /* 705 * We can clear the tag now but we have to be careful so that concurrent 706 * dax_writeback_one() calls for the same index cannot finish before we 707 * actually flush the caches. This is achieved as the calls will look 708 * at the entry only under tree_lock and once they do that they will 709 * see the entry locked and wait for it to unlock. 710 */ 711 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE); 712 spin_unlock_irq(&mapping->tree_lock); 713 714 /* 715 * Even if dax_writeback_mapping_range() was given a wbc->range_start 716 * in the middle of a PMD, the 'index' we are given will be aligned to 717 * the start index of the PMD, as will the sector we pull from 718 * 'entry'. This allows us to flush for PMD_SIZE and not have to 719 * worry about partial PMD writebacks. 720 */ 721 sector = dax_radix_sector(entry); 722 size = PAGE_SIZE << dax_radix_order(entry); 723 724 id = dax_read_lock(); 725 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 726 if (ret) 727 goto dax_unlock; 728 729 /* 730 * dax_direct_access() may sleep, so cannot hold tree_lock over 731 * its invocation. 732 */ 733 ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn); 734 if (ret < 0) 735 goto dax_unlock; 736 737 if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) { 738 ret = -EIO; 739 goto dax_unlock; 740 } 741 742 dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn)); 743 dax_flush(dax_dev, kaddr, size); 744 /* 745 * After we have flushed the cache, we can clear the dirty tag. There 746 * cannot be new dirty data in the pfn after the flush has completed as 747 * the pfn mappings are writeprotected and fault waits for mapping 748 * entry lock. 749 */ 750 spin_lock_irq(&mapping->tree_lock); 751 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY); 752 spin_unlock_irq(&mapping->tree_lock); 753 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT); 754 dax_unlock: 755 dax_read_unlock(id); 756 put_locked_mapping_entry(mapping, index); 757 return ret; 758 759 put_unlocked: 760 put_unlocked_mapping_entry(mapping, index, entry2); 761 spin_unlock_irq(&mapping->tree_lock); 762 return ret; 763 } 764 765 /* 766 * Flush the mapping to the persistent domain within the byte range of [start, 767 * end]. This is required by data integrity operations to ensure file data is 768 * on persistent storage prior to completion of the operation. 769 */ 770 int dax_writeback_mapping_range(struct address_space *mapping, 771 struct block_device *bdev, struct writeback_control *wbc) 772 { 773 struct inode *inode = mapping->host; 774 pgoff_t start_index, end_index; 775 pgoff_t indices[PAGEVEC_SIZE]; 776 struct dax_device *dax_dev; 777 struct pagevec pvec; 778 bool done = false; 779 int i, ret = 0; 780 781 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 782 return -EIO; 783 784 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 785 return 0; 786 787 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 788 if (!dax_dev) 789 return -EIO; 790 791 start_index = wbc->range_start >> PAGE_SHIFT; 792 end_index = wbc->range_end >> PAGE_SHIFT; 793 794 trace_dax_writeback_range(inode, start_index, end_index); 795 796 tag_pages_for_writeback(mapping, start_index, end_index); 797 798 pagevec_init(&pvec); 799 while (!done) { 800 pvec.nr = find_get_entries_tag(mapping, start_index, 801 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE, 802 pvec.pages, indices); 803 804 if (pvec.nr == 0) 805 break; 806 807 for (i = 0; i < pvec.nr; i++) { 808 if (indices[i] > end_index) { 809 done = true; 810 break; 811 } 812 813 ret = dax_writeback_one(bdev, dax_dev, mapping, 814 indices[i], pvec.pages[i]); 815 if (ret < 0) { 816 mapping_set_error(mapping, ret); 817 goto out; 818 } 819 } 820 start_index = indices[pvec.nr - 1] + 1; 821 } 822 out: 823 put_dax(dax_dev); 824 trace_dax_writeback_range_done(inode, start_index, end_index); 825 return (ret < 0 ? ret : 0); 826 } 827 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 828 829 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 830 { 831 return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9; 832 } 833 834 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size, 835 pfn_t *pfnp) 836 { 837 const sector_t sector = dax_iomap_sector(iomap, pos); 838 pgoff_t pgoff; 839 void *kaddr; 840 int id, rc; 841 long length; 842 843 rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff); 844 if (rc) 845 return rc; 846 id = dax_read_lock(); 847 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 848 &kaddr, pfnp); 849 if (length < 0) { 850 rc = length; 851 goto out; 852 } 853 rc = -EINVAL; 854 if (PFN_PHYS(length) < size) 855 goto out; 856 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 857 goto out; 858 /* For larger pages we need devmap */ 859 if (length > 1 && !pfn_t_devmap(*pfnp)) 860 goto out; 861 rc = 0; 862 out: 863 dax_read_unlock(id); 864 return rc; 865 } 866 867 /* 868 * The user has performed a load from a hole in the file. Allocating a new 869 * page in the file would cause excessive storage usage for workloads with 870 * sparse files. Instead we insert a read-only mapping of the 4k zero page. 871 * If this page is ever written to we will re-fault and change the mapping to 872 * point to real DAX storage instead. 873 */ 874 static int dax_load_hole(struct address_space *mapping, void *entry, 875 struct vm_fault *vmf) 876 { 877 struct inode *inode = mapping->host; 878 unsigned long vaddr = vmf->address; 879 int ret = VM_FAULT_NOPAGE; 880 struct page *zero_page; 881 void *entry2; 882 883 zero_page = ZERO_PAGE(0); 884 if (unlikely(!zero_page)) { 885 ret = VM_FAULT_OOM; 886 goto out; 887 } 888 889 entry2 = dax_insert_mapping_entry(mapping, vmf, entry, 0, 890 RADIX_DAX_ZERO_PAGE, false); 891 if (IS_ERR(entry2)) { 892 ret = VM_FAULT_SIGBUS; 893 goto out; 894 } 895 896 vm_insert_mixed(vmf->vma, vaddr, page_to_pfn_t(zero_page)); 897 out: 898 trace_dax_load_hole(inode, vmf, ret); 899 return ret; 900 } 901 902 static bool dax_range_is_aligned(struct block_device *bdev, 903 unsigned int offset, unsigned int length) 904 { 905 unsigned short sector_size = bdev_logical_block_size(bdev); 906 907 if (!IS_ALIGNED(offset, sector_size)) 908 return false; 909 if (!IS_ALIGNED(length, sector_size)) 910 return false; 911 912 return true; 913 } 914 915 int __dax_zero_page_range(struct block_device *bdev, 916 struct dax_device *dax_dev, sector_t sector, 917 unsigned int offset, unsigned int size) 918 { 919 if (dax_range_is_aligned(bdev, offset, size)) { 920 sector_t start_sector = sector + (offset >> 9); 921 922 return blkdev_issue_zeroout(bdev, start_sector, 923 size >> 9, GFP_NOFS, 0); 924 } else { 925 pgoff_t pgoff; 926 long rc, id; 927 void *kaddr; 928 pfn_t pfn; 929 930 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); 931 if (rc) 932 return rc; 933 934 id = dax_read_lock(); 935 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, 936 &pfn); 937 if (rc < 0) { 938 dax_read_unlock(id); 939 return rc; 940 } 941 memset(kaddr + offset, 0, size); 942 dax_flush(dax_dev, kaddr + offset, size); 943 dax_read_unlock(id); 944 } 945 return 0; 946 } 947 EXPORT_SYMBOL_GPL(__dax_zero_page_range); 948 949 static loff_t 950 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 951 struct iomap *iomap) 952 { 953 struct block_device *bdev = iomap->bdev; 954 struct dax_device *dax_dev = iomap->dax_dev; 955 struct iov_iter *iter = data; 956 loff_t end = pos + length, done = 0; 957 ssize_t ret = 0; 958 int id; 959 960 if (iov_iter_rw(iter) == READ) { 961 end = min(end, i_size_read(inode)); 962 if (pos >= end) 963 return 0; 964 965 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 966 return iov_iter_zero(min(length, end - pos), iter); 967 } 968 969 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 970 return -EIO; 971 972 /* 973 * Write can allocate block for an area which has a hole page mapped 974 * into page tables. We have to tear down these mappings so that data 975 * written by write(2) is visible in mmap. 976 */ 977 if (iomap->flags & IOMAP_F_NEW) { 978 invalidate_inode_pages2_range(inode->i_mapping, 979 pos >> PAGE_SHIFT, 980 (end - 1) >> PAGE_SHIFT); 981 } 982 983 id = dax_read_lock(); 984 while (pos < end) { 985 unsigned offset = pos & (PAGE_SIZE - 1); 986 const size_t size = ALIGN(length + offset, PAGE_SIZE); 987 const sector_t sector = dax_iomap_sector(iomap, pos); 988 ssize_t map_len; 989 pgoff_t pgoff; 990 void *kaddr; 991 pfn_t pfn; 992 993 if (fatal_signal_pending(current)) { 994 ret = -EINTR; 995 break; 996 } 997 998 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 999 if (ret) 1000 break; 1001 1002 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 1003 &kaddr, &pfn); 1004 if (map_len < 0) { 1005 ret = map_len; 1006 break; 1007 } 1008 1009 map_len = PFN_PHYS(map_len); 1010 kaddr += offset; 1011 map_len -= offset; 1012 if (map_len > end - pos) 1013 map_len = end - pos; 1014 1015 /* 1016 * The userspace address for the memory copy has already been 1017 * validated via access_ok() in either vfs_read() or 1018 * vfs_write(), depending on which operation we are doing. 1019 */ 1020 if (iov_iter_rw(iter) == WRITE) 1021 map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1022 map_len, iter); 1023 else 1024 map_len = copy_to_iter(kaddr, map_len, iter); 1025 if (map_len <= 0) { 1026 ret = map_len ? map_len : -EFAULT; 1027 break; 1028 } 1029 1030 pos += map_len; 1031 length -= map_len; 1032 done += map_len; 1033 } 1034 dax_read_unlock(id); 1035 1036 return done ? done : ret; 1037 } 1038 1039 /** 1040 * dax_iomap_rw - Perform I/O to a DAX file 1041 * @iocb: The control block for this I/O 1042 * @iter: The addresses to do I/O from or to 1043 * @ops: iomap ops passed from the file system 1044 * 1045 * This function performs read and write operations to directly mapped 1046 * persistent memory. The callers needs to take care of read/write exclusion 1047 * and evicting any page cache pages in the region under I/O. 1048 */ 1049 ssize_t 1050 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 1051 const struct iomap_ops *ops) 1052 { 1053 struct address_space *mapping = iocb->ki_filp->f_mapping; 1054 struct inode *inode = mapping->host; 1055 loff_t pos = iocb->ki_pos, ret = 0, done = 0; 1056 unsigned flags = 0; 1057 1058 if (iov_iter_rw(iter) == WRITE) { 1059 lockdep_assert_held_exclusive(&inode->i_rwsem); 1060 flags |= IOMAP_WRITE; 1061 } else { 1062 lockdep_assert_held(&inode->i_rwsem); 1063 } 1064 1065 while (iov_iter_count(iter)) { 1066 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, 1067 iter, dax_iomap_actor); 1068 if (ret <= 0) 1069 break; 1070 pos += ret; 1071 done += ret; 1072 } 1073 1074 iocb->ki_pos += done; 1075 return done ? done : ret; 1076 } 1077 EXPORT_SYMBOL_GPL(dax_iomap_rw); 1078 1079 static int dax_fault_return(int error) 1080 { 1081 if (error == 0) 1082 return VM_FAULT_NOPAGE; 1083 if (error == -ENOMEM) 1084 return VM_FAULT_OOM; 1085 return VM_FAULT_SIGBUS; 1086 } 1087 1088 /* 1089 * MAP_SYNC on a dax mapping guarantees dirty metadata is 1090 * flushed on write-faults (non-cow), but not read-faults. 1091 */ 1092 static bool dax_fault_is_synchronous(unsigned long flags, 1093 struct vm_area_struct *vma, struct iomap *iomap) 1094 { 1095 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) 1096 && (iomap->flags & IOMAP_F_DIRTY); 1097 } 1098 1099 static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1100 const struct iomap_ops *ops) 1101 { 1102 struct vm_area_struct *vma = vmf->vma; 1103 struct address_space *mapping = vma->vm_file->f_mapping; 1104 struct inode *inode = mapping->host; 1105 unsigned long vaddr = vmf->address; 1106 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; 1107 struct iomap iomap = { 0 }; 1108 unsigned flags = IOMAP_FAULT; 1109 int error, major = 0; 1110 bool write = vmf->flags & FAULT_FLAG_WRITE; 1111 bool sync; 1112 int vmf_ret = 0; 1113 void *entry; 1114 pfn_t pfn; 1115 1116 trace_dax_pte_fault(inode, vmf, vmf_ret); 1117 /* 1118 * Check whether offset isn't beyond end of file now. Caller is supposed 1119 * to hold locks serializing us with truncate / punch hole so this is 1120 * a reliable test. 1121 */ 1122 if (pos >= i_size_read(inode)) { 1123 vmf_ret = VM_FAULT_SIGBUS; 1124 goto out; 1125 } 1126 1127 if (write && !vmf->cow_page) 1128 flags |= IOMAP_WRITE; 1129 1130 entry = grab_mapping_entry(mapping, vmf->pgoff, 0); 1131 if (IS_ERR(entry)) { 1132 vmf_ret = dax_fault_return(PTR_ERR(entry)); 1133 goto out; 1134 } 1135 1136 /* 1137 * It is possible, particularly with mixed reads & writes to private 1138 * mappings, that we have raced with a PMD fault that overlaps with 1139 * the PTE we need to set up. If so just return and the fault will be 1140 * retried. 1141 */ 1142 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1143 vmf_ret = VM_FAULT_NOPAGE; 1144 goto unlock_entry; 1145 } 1146 1147 /* 1148 * Note that we don't bother to use iomap_apply here: DAX required 1149 * the file system block size to be equal the page size, which means 1150 * that we never have to deal with more than a single extent here. 1151 */ 1152 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); 1153 if (error) { 1154 vmf_ret = dax_fault_return(error); 1155 goto unlock_entry; 1156 } 1157 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 1158 error = -EIO; /* fs corruption? */ 1159 goto error_finish_iomap; 1160 } 1161 1162 if (vmf->cow_page) { 1163 sector_t sector = dax_iomap_sector(&iomap, pos); 1164 1165 switch (iomap.type) { 1166 case IOMAP_HOLE: 1167 case IOMAP_UNWRITTEN: 1168 clear_user_highpage(vmf->cow_page, vaddr); 1169 break; 1170 case IOMAP_MAPPED: 1171 error = copy_user_dax(iomap.bdev, iomap.dax_dev, 1172 sector, PAGE_SIZE, vmf->cow_page, vaddr); 1173 break; 1174 default: 1175 WARN_ON_ONCE(1); 1176 error = -EIO; 1177 break; 1178 } 1179 1180 if (error) 1181 goto error_finish_iomap; 1182 1183 __SetPageUptodate(vmf->cow_page); 1184 vmf_ret = finish_fault(vmf); 1185 if (!vmf_ret) 1186 vmf_ret = VM_FAULT_DONE_COW; 1187 goto finish_iomap; 1188 } 1189 1190 sync = dax_fault_is_synchronous(flags, vma, &iomap); 1191 1192 switch (iomap.type) { 1193 case IOMAP_MAPPED: 1194 if (iomap.flags & IOMAP_F_NEW) { 1195 count_vm_event(PGMAJFAULT); 1196 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 1197 major = VM_FAULT_MAJOR; 1198 } 1199 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn); 1200 if (error < 0) 1201 goto error_finish_iomap; 1202 1203 entry = dax_insert_mapping_entry(mapping, vmf, entry, 1204 dax_iomap_sector(&iomap, pos), 1205 0, write && !sync); 1206 if (IS_ERR(entry)) { 1207 error = PTR_ERR(entry); 1208 goto error_finish_iomap; 1209 } 1210 1211 /* 1212 * If we are doing synchronous page fault and inode needs fsync, 1213 * we can insert PTE into page tables only after that happens. 1214 * Skip insertion for now and return the pfn so that caller can 1215 * insert it after fsync is done. 1216 */ 1217 if (sync) { 1218 if (WARN_ON_ONCE(!pfnp)) { 1219 error = -EIO; 1220 goto error_finish_iomap; 1221 } 1222 *pfnp = pfn; 1223 vmf_ret = VM_FAULT_NEEDDSYNC | major; 1224 goto finish_iomap; 1225 } 1226 trace_dax_insert_mapping(inode, vmf, entry); 1227 if (write) 1228 error = vm_insert_mixed_mkwrite(vma, vaddr, pfn); 1229 else 1230 error = vm_insert_mixed(vma, vaddr, pfn); 1231 1232 /* -EBUSY is fine, somebody else faulted on the same PTE */ 1233 if (error == -EBUSY) 1234 error = 0; 1235 break; 1236 case IOMAP_UNWRITTEN: 1237 case IOMAP_HOLE: 1238 if (!write) { 1239 vmf_ret = dax_load_hole(mapping, entry, vmf); 1240 goto finish_iomap; 1241 } 1242 /*FALLTHRU*/ 1243 default: 1244 WARN_ON_ONCE(1); 1245 error = -EIO; 1246 break; 1247 } 1248 1249 error_finish_iomap: 1250 vmf_ret = dax_fault_return(error) | major; 1251 finish_iomap: 1252 if (ops->iomap_end) { 1253 int copied = PAGE_SIZE; 1254 1255 if (vmf_ret & VM_FAULT_ERROR) 1256 copied = 0; 1257 /* 1258 * The fault is done by now and there's no way back (other 1259 * thread may be already happily using PTE we have installed). 1260 * Just ignore error from ->iomap_end since we cannot do much 1261 * with it. 1262 */ 1263 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); 1264 } 1265 unlock_entry: 1266 put_locked_mapping_entry(mapping, vmf->pgoff); 1267 out: 1268 trace_dax_pte_fault_done(inode, vmf, vmf_ret); 1269 return vmf_ret; 1270 } 1271 1272 #ifdef CONFIG_FS_DAX_PMD 1273 /* 1274 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up 1275 * more often than one might expect in the below functions. 1276 */ 1277 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 1278 1279 static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap, 1280 void *entry) 1281 { 1282 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1283 unsigned long pmd_addr = vmf->address & PMD_MASK; 1284 struct inode *inode = mapping->host; 1285 struct page *zero_page; 1286 void *ret = NULL; 1287 spinlock_t *ptl; 1288 pmd_t pmd_entry; 1289 1290 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1291 1292 if (unlikely(!zero_page)) 1293 goto fallback; 1294 1295 ret = dax_insert_mapping_entry(mapping, vmf, entry, 0, 1296 RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false); 1297 if (IS_ERR(ret)) 1298 goto fallback; 1299 1300 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1301 if (!pmd_none(*(vmf->pmd))) { 1302 spin_unlock(ptl); 1303 goto fallback; 1304 } 1305 1306 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1307 pmd_entry = pmd_mkhuge(pmd_entry); 1308 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1309 spin_unlock(ptl); 1310 trace_dax_pmd_load_hole(inode, vmf, zero_page, ret); 1311 return VM_FAULT_NOPAGE; 1312 1313 fallback: 1314 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret); 1315 return VM_FAULT_FALLBACK; 1316 } 1317 1318 static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1319 const struct iomap_ops *ops) 1320 { 1321 struct vm_area_struct *vma = vmf->vma; 1322 struct address_space *mapping = vma->vm_file->f_mapping; 1323 unsigned long pmd_addr = vmf->address & PMD_MASK; 1324 bool write = vmf->flags & FAULT_FLAG_WRITE; 1325 bool sync; 1326 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; 1327 struct inode *inode = mapping->host; 1328 int result = VM_FAULT_FALLBACK; 1329 struct iomap iomap = { 0 }; 1330 pgoff_t max_pgoff, pgoff; 1331 void *entry; 1332 loff_t pos; 1333 int error; 1334 pfn_t pfn; 1335 1336 /* 1337 * Check whether offset isn't beyond end of file now. Caller is 1338 * supposed to hold locks serializing us with truncate / punch hole so 1339 * this is a reliable test. 1340 */ 1341 pgoff = linear_page_index(vma, pmd_addr); 1342 max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1343 1344 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); 1345 1346 /* 1347 * Make sure that the faulting address's PMD offset (color) matches 1348 * the PMD offset from the start of the file. This is necessary so 1349 * that a PMD range in the page table overlaps exactly with a PMD 1350 * range in the radix tree. 1351 */ 1352 if ((vmf->pgoff & PG_PMD_COLOUR) != 1353 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 1354 goto fallback; 1355 1356 /* Fall back to PTEs if we're going to COW */ 1357 if (write && !(vma->vm_flags & VM_SHARED)) 1358 goto fallback; 1359 1360 /* If the PMD would extend outside the VMA */ 1361 if (pmd_addr < vma->vm_start) 1362 goto fallback; 1363 if ((pmd_addr + PMD_SIZE) > vma->vm_end) 1364 goto fallback; 1365 1366 if (pgoff >= max_pgoff) { 1367 result = VM_FAULT_SIGBUS; 1368 goto out; 1369 } 1370 1371 /* If the PMD would extend beyond the file size */ 1372 if ((pgoff | PG_PMD_COLOUR) >= max_pgoff) 1373 goto fallback; 1374 1375 /* 1376 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a 1377 * 2MiB zero page entry or a DAX PMD. If it can't (because a 4k page 1378 * is already in the tree, for instance), it will return -EEXIST and 1379 * we just fall back to 4k entries. 1380 */ 1381 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); 1382 if (IS_ERR(entry)) 1383 goto fallback; 1384 1385 /* 1386 * It is possible, particularly with mixed reads & writes to private 1387 * mappings, that we have raced with a PTE fault that overlaps with 1388 * the PMD we need to set up. If so just return and the fault will be 1389 * retried. 1390 */ 1391 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1392 !pmd_devmap(*vmf->pmd)) { 1393 result = 0; 1394 goto unlock_entry; 1395 } 1396 1397 /* 1398 * Note that we don't use iomap_apply here. We aren't doing I/O, only 1399 * setting up a mapping, so really we're using iomap_begin() as a way 1400 * to look up our filesystem block. 1401 */ 1402 pos = (loff_t)pgoff << PAGE_SHIFT; 1403 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); 1404 if (error) 1405 goto unlock_entry; 1406 1407 if (iomap.offset + iomap.length < pos + PMD_SIZE) 1408 goto finish_iomap; 1409 1410 sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap); 1411 1412 switch (iomap.type) { 1413 case IOMAP_MAPPED: 1414 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn); 1415 if (error < 0) 1416 goto finish_iomap; 1417 1418 entry = dax_insert_mapping_entry(mapping, vmf, entry, 1419 dax_iomap_sector(&iomap, pos), 1420 RADIX_DAX_PMD, write && !sync); 1421 if (IS_ERR(entry)) 1422 goto finish_iomap; 1423 1424 /* 1425 * If we are doing synchronous page fault and inode needs fsync, 1426 * we can insert PMD into page tables only after that happens. 1427 * Skip insertion for now and return the pfn so that caller can 1428 * insert it after fsync is done. 1429 */ 1430 if (sync) { 1431 if (WARN_ON_ONCE(!pfnp)) 1432 goto finish_iomap; 1433 *pfnp = pfn; 1434 result = VM_FAULT_NEEDDSYNC; 1435 goto finish_iomap; 1436 } 1437 1438 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); 1439 result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn, 1440 write); 1441 break; 1442 case IOMAP_UNWRITTEN: 1443 case IOMAP_HOLE: 1444 if (WARN_ON_ONCE(write)) 1445 break; 1446 result = dax_pmd_load_hole(vmf, &iomap, entry); 1447 break; 1448 default: 1449 WARN_ON_ONCE(1); 1450 break; 1451 } 1452 1453 finish_iomap: 1454 if (ops->iomap_end) { 1455 int copied = PMD_SIZE; 1456 1457 if (result == VM_FAULT_FALLBACK) 1458 copied = 0; 1459 /* 1460 * The fault is done by now and there's no way back (other 1461 * thread may be already happily using PMD we have installed). 1462 * Just ignore error from ->iomap_end since we cannot do much 1463 * with it. 1464 */ 1465 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, 1466 &iomap); 1467 } 1468 unlock_entry: 1469 put_locked_mapping_entry(mapping, pgoff); 1470 fallback: 1471 if (result == VM_FAULT_FALLBACK) { 1472 split_huge_pmd(vma, vmf->pmd, vmf->address); 1473 count_vm_event(THP_FAULT_FALLBACK); 1474 } 1475 out: 1476 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); 1477 return result; 1478 } 1479 #else 1480 static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1481 const struct iomap_ops *ops) 1482 { 1483 return VM_FAULT_FALLBACK; 1484 } 1485 #endif /* CONFIG_FS_DAX_PMD */ 1486 1487 /** 1488 * dax_iomap_fault - handle a page fault on a DAX file 1489 * @vmf: The description of the fault 1490 * @pe_size: Size of the page to fault in 1491 * @pfnp: PFN to insert for synchronous faults if fsync is required 1492 * @ops: Iomap ops passed from the file system 1493 * 1494 * When a page fault occurs, filesystems may call this helper in 1495 * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1496 * has done all the necessary locking for page fault to proceed 1497 * successfully. 1498 */ 1499 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1500 pfn_t *pfnp, const struct iomap_ops *ops) 1501 { 1502 switch (pe_size) { 1503 case PE_SIZE_PTE: 1504 return dax_iomap_pte_fault(vmf, pfnp, ops); 1505 case PE_SIZE_PMD: 1506 return dax_iomap_pmd_fault(vmf, pfnp, ops); 1507 default: 1508 return VM_FAULT_FALLBACK; 1509 } 1510 } 1511 EXPORT_SYMBOL_GPL(dax_iomap_fault); 1512 1513 /** 1514 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables 1515 * @vmf: The description of the fault 1516 * @pe_size: Size of entry to be inserted 1517 * @pfn: PFN to insert 1518 * 1519 * This function inserts writeable PTE or PMD entry into page tables for mmaped 1520 * DAX file. It takes care of marking corresponding radix tree entry as dirty 1521 * as well. 1522 */ 1523 static int dax_insert_pfn_mkwrite(struct vm_fault *vmf, 1524 enum page_entry_size pe_size, 1525 pfn_t pfn) 1526 { 1527 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1528 void *entry, **slot; 1529 pgoff_t index = vmf->pgoff; 1530 int vmf_ret, error; 1531 1532 spin_lock_irq(&mapping->tree_lock); 1533 entry = get_unlocked_mapping_entry(mapping, index, &slot); 1534 /* Did we race with someone splitting entry or so? */ 1535 if (!entry || 1536 (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) || 1537 (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) { 1538 put_unlocked_mapping_entry(mapping, index, entry); 1539 spin_unlock_irq(&mapping->tree_lock); 1540 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 1541 VM_FAULT_NOPAGE); 1542 return VM_FAULT_NOPAGE; 1543 } 1544 radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY); 1545 entry = lock_slot(mapping, slot); 1546 spin_unlock_irq(&mapping->tree_lock); 1547 switch (pe_size) { 1548 case PE_SIZE_PTE: 1549 error = vm_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 1550 vmf_ret = dax_fault_return(error); 1551 break; 1552 #ifdef CONFIG_FS_DAX_PMD 1553 case PE_SIZE_PMD: 1554 vmf_ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, 1555 pfn, true); 1556 break; 1557 #endif 1558 default: 1559 vmf_ret = VM_FAULT_FALLBACK; 1560 } 1561 put_locked_mapping_entry(mapping, index); 1562 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, vmf_ret); 1563 return vmf_ret; 1564 } 1565 1566 /** 1567 * dax_finish_sync_fault - finish synchronous page fault 1568 * @vmf: The description of the fault 1569 * @pe_size: Size of entry to be inserted 1570 * @pfn: PFN to insert 1571 * 1572 * This function ensures that the file range touched by the page fault is 1573 * stored persistently on the media and handles inserting of appropriate page 1574 * table entry. 1575 */ 1576 int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1577 pfn_t pfn) 1578 { 1579 int err; 1580 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; 1581 size_t len = 0; 1582 1583 if (pe_size == PE_SIZE_PTE) 1584 len = PAGE_SIZE; 1585 else if (pe_size == PE_SIZE_PMD) 1586 len = PMD_SIZE; 1587 else 1588 WARN_ON_ONCE(1); 1589 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); 1590 if (err) 1591 return VM_FAULT_SIGBUS; 1592 return dax_insert_pfn_mkwrite(vmf, pe_size, pfn); 1593 } 1594 EXPORT_SYMBOL_GPL(dax_finish_sync_fault); 1595