1 /* 2 * fs/dax.c - Direct Access filesystem code 3 * Copyright (c) 2013-2014 Intel Corporation 4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 */ 16 17 #include <linux/atomic.h> 18 #include <linux/blkdev.h> 19 #include <linux/buffer_head.h> 20 #include <linux/dax.h> 21 #include <linux/fs.h> 22 #include <linux/genhd.h> 23 #include <linux/highmem.h> 24 #include <linux/memcontrol.h> 25 #include <linux/mm.h> 26 #include <linux/mutex.h> 27 #include <linux/pagevec.h> 28 #include <linux/sched.h> 29 #include <linux/sched/signal.h> 30 #include <linux/uio.h> 31 #include <linux/vmstat.h> 32 #include <linux/pfn_t.h> 33 #include <linux/sizes.h> 34 #include <linux/mmu_notifier.h> 35 #include <linux/iomap.h> 36 #include "internal.h" 37 38 #define CREATE_TRACE_POINTS 39 #include <trace/events/fs_dax.h> 40 41 /* We choose 4096 entries - same as per-zone page wait tables */ 42 #define DAX_WAIT_TABLE_BITS 12 43 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 44 45 /* The 'colour' (ie low bits) within a PMD of a page offset. */ 46 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 47 48 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 49 50 static int __init init_dax_wait_table(void) 51 { 52 int i; 53 54 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 55 init_waitqueue_head(wait_table + i); 56 return 0; 57 } 58 fs_initcall(init_dax_wait_table); 59 60 /* 61 * We use lowest available bit in exceptional entry for locking, one bit for 62 * the entry size (PMD) and two more to tell us if the entry is a zero page or 63 * an empty entry that is just used for locking. In total four special bits. 64 * 65 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 66 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 67 * block allocation. 68 */ 69 #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4) 70 #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) 71 #define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1)) 72 #define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2)) 73 #define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)) 74 75 static unsigned long dax_radix_sector(void *entry) 76 { 77 return (unsigned long)entry >> RADIX_DAX_SHIFT; 78 } 79 80 static void *dax_radix_locked_entry(sector_t sector, unsigned long flags) 81 { 82 return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags | 83 ((unsigned long)sector << RADIX_DAX_SHIFT) | 84 RADIX_DAX_ENTRY_LOCK); 85 } 86 87 static unsigned int dax_radix_order(void *entry) 88 { 89 if ((unsigned long)entry & RADIX_DAX_PMD) 90 return PMD_SHIFT - PAGE_SHIFT; 91 return 0; 92 } 93 94 static int dax_is_pmd_entry(void *entry) 95 { 96 return (unsigned long)entry & RADIX_DAX_PMD; 97 } 98 99 static int dax_is_pte_entry(void *entry) 100 { 101 return !((unsigned long)entry & RADIX_DAX_PMD); 102 } 103 104 static int dax_is_zero_entry(void *entry) 105 { 106 return (unsigned long)entry & RADIX_DAX_ZERO_PAGE; 107 } 108 109 static int dax_is_empty_entry(void *entry) 110 { 111 return (unsigned long)entry & RADIX_DAX_EMPTY; 112 } 113 114 /* 115 * DAX radix tree locking 116 */ 117 struct exceptional_entry_key { 118 struct address_space *mapping; 119 pgoff_t entry_start; 120 }; 121 122 struct wait_exceptional_entry_queue { 123 wait_queue_entry_t wait; 124 struct exceptional_entry_key key; 125 }; 126 127 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, 128 pgoff_t index, void *entry, struct exceptional_entry_key *key) 129 { 130 unsigned long hash; 131 132 /* 133 * If 'entry' is a PMD, align the 'index' that we use for the wait 134 * queue to the start of that PMD. This ensures that all offsets in 135 * the range covered by the PMD map to the same bit lock. 136 */ 137 if (dax_is_pmd_entry(entry)) 138 index &= ~PG_PMD_COLOUR; 139 140 key->mapping = mapping; 141 key->entry_start = index; 142 143 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS); 144 return wait_table + hash; 145 } 146 147 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode, 148 int sync, void *keyp) 149 { 150 struct exceptional_entry_key *key = keyp; 151 struct wait_exceptional_entry_queue *ewait = 152 container_of(wait, struct wait_exceptional_entry_queue, wait); 153 154 if (key->mapping != ewait->key.mapping || 155 key->entry_start != ewait->key.entry_start) 156 return 0; 157 return autoremove_wake_function(wait, mode, sync, NULL); 158 } 159 160 /* 161 * We do not necessarily hold the mapping->tree_lock when we call this 162 * function so it is possible that 'entry' is no longer a valid item in the 163 * radix tree. This is okay because all we really need to do is to find the 164 * correct waitqueue where tasks might be waiting for that old 'entry' and 165 * wake them. 166 */ 167 static void dax_wake_mapping_entry_waiter(struct address_space *mapping, 168 pgoff_t index, void *entry, bool wake_all) 169 { 170 struct exceptional_entry_key key; 171 wait_queue_head_t *wq; 172 173 wq = dax_entry_waitqueue(mapping, index, entry, &key); 174 175 /* 176 * Checking for locked entry and prepare_to_wait_exclusive() happens 177 * under mapping->tree_lock, ditto for entry handling in our callers. 178 * So at this point all tasks that could have seen our entry locked 179 * must be in the waitqueue and the following check will see them. 180 */ 181 if (waitqueue_active(wq)) 182 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 183 } 184 185 /* 186 * Check whether the given slot is locked. The function must be called with 187 * mapping->tree_lock held 188 */ 189 static inline int slot_locked(struct address_space *mapping, void **slot) 190 { 191 unsigned long entry = (unsigned long) 192 radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 193 return entry & RADIX_DAX_ENTRY_LOCK; 194 } 195 196 /* 197 * Mark the given slot is locked. The function must be called with 198 * mapping->tree_lock held 199 */ 200 static inline void *lock_slot(struct address_space *mapping, void **slot) 201 { 202 unsigned long entry = (unsigned long) 203 radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 204 205 entry |= RADIX_DAX_ENTRY_LOCK; 206 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry); 207 return (void *)entry; 208 } 209 210 /* 211 * Mark the given slot is unlocked. The function must be called with 212 * mapping->tree_lock held 213 */ 214 static inline void *unlock_slot(struct address_space *mapping, void **slot) 215 { 216 unsigned long entry = (unsigned long) 217 radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 218 219 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK; 220 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry); 221 return (void *)entry; 222 } 223 224 /* 225 * Lookup entry in radix tree, wait for it to become unlocked if it is 226 * exceptional entry and return it. The caller must call 227 * put_unlocked_mapping_entry() when he decided not to lock the entry or 228 * put_locked_mapping_entry() when he locked the entry and now wants to 229 * unlock it. 230 * 231 * The function must be called with mapping->tree_lock held. 232 */ 233 static void *get_unlocked_mapping_entry(struct address_space *mapping, 234 pgoff_t index, void ***slotp) 235 { 236 void *entry, **slot; 237 struct wait_exceptional_entry_queue ewait; 238 wait_queue_head_t *wq; 239 240 init_wait(&ewait.wait); 241 ewait.wait.func = wake_exceptional_entry_func; 242 243 for (;;) { 244 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, 245 &slot); 246 if (!entry || 247 WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) || 248 !slot_locked(mapping, slot)) { 249 if (slotp) 250 *slotp = slot; 251 return entry; 252 } 253 254 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); 255 prepare_to_wait_exclusive(wq, &ewait.wait, 256 TASK_UNINTERRUPTIBLE); 257 spin_unlock_irq(&mapping->tree_lock); 258 schedule(); 259 finish_wait(wq, &ewait.wait); 260 spin_lock_irq(&mapping->tree_lock); 261 } 262 } 263 264 static void dax_unlock_mapping_entry(struct address_space *mapping, 265 pgoff_t index) 266 { 267 void *entry, **slot; 268 269 spin_lock_irq(&mapping->tree_lock); 270 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot); 271 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) || 272 !slot_locked(mapping, slot))) { 273 spin_unlock_irq(&mapping->tree_lock); 274 return; 275 } 276 unlock_slot(mapping, slot); 277 spin_unlock_irq(&mapping->tree_lock); 278 dax_wake_mapping_entry_waiter(mapping, index, entry, false); 279 } 280 281 static void put_locked_mapping_entry(struct address_space *mapping, 282 pgoff_t index) 283 { 284 dax_unlock_mapping_entry(mapping, index); 285 } 286 287 /* 288 * Called when we are done with radix tree entry we looked up via 289 * get_unlocked_mapping_entry() and which we didn't lock in the end. 290 */ 291 static void put_unlocked_mapping_entry(struct address_space *mapping, 292 pgoff_t index, void *entry) 293 { 294 if (!entry) 295 return; 296 297 /* We have to wake up next waiter for the radix tree entry lock */ 298 dax_wake_mapping_entry_waiter(mapping, index, entry, false); 299 } 300 301 /* 302 * Find radix tree entry at given index. If it points to an exceptional entry, 303 * return it with the radix tree entry locked. If the radix tree doesn't 304 * contain given index, create an empty exceptional entry for the index and 305 * return with it locked. 306 * 307 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will 308 * either return that locked entry or will return an error. This error will 309 * happen if there are any 4k entries within the 2MiB range that we are 310 * requesting. 311 * 312 * We always favor 4k entries over 2MiB entries. There isn't a flow where we 313 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB 314 * insertion will fail if it finds any 4k entries already in the tree, and a 315 * 4k insertion will cause an existing 2MiB entry to be unmapped and 316 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as 317 * well as 2MiB empty entries. 318 * 319 * The exception to this downgrade path is for 2MiB DAX PMD entries that have 320 * real storage backing them. We will leave these real 2MiB DAX entries in 321 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry. 322 * 323 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 324 * persistent memory the benefit is doubtful. We can add that later if we can 325 * show it helps. 326 */ 327 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, 328 unsigned long size_flag) 329 { 330 bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */ 331 void *entry, **slot; 332 333 restart: 334 spin_lock_irq(&mapping->tree_lock); 335 entry = get_unlocked_mapping_entry(mapping, index, &slot); 336 337 if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) { 338 entry = ERR_PTR(-EIO); 339 goto out_unlock; 340 } 341 342 if (entry) { 343 if (size_flag & RADIX_DAX_PMD) { 344 if (dax_is_pte_entry(entry)) { 345 put_unlocked_mapping_entry(mapping, index, 346 entry); 347 entry = ERR_PTR(-EEXIST); 348 goto out_unlock; 349 } 350 } else { /* trying to grab a PTE entry */ 351 if (dax_is_pmd_entry(entry) && 352 (dax_is_zero_entry(entry) || 353 dax_is_empty_entry(entry))) { 354 pmd_downgrade = true; 355 } 356 } 357 } 358 359 /* No entry for given index? Make sure radix tree is big enough. */ 360 if (!entry || pmd_downgrade) { 361 int err; 362 363 if (pmd_downgrade) { 364 /* 365 * Make sure 'entry' remains valid while we drop 366 * mapping->tree_lock. 367 */ 368 entry = lock_slot(mapping, slot); 369 } 370 371 spin_unlock_irq(&mapping->tree_lock); 372 /* 373 * Besides huge zero pages the only other thing that gets 374 * downgraded are empty entries which don't need to be 375 * unmapped. 376 */ 377 if (pmd_downgrade && dax_is_zero_entry(entry)) 378 unmap_mapping_range(mapping, 379 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0); 380 381 err = radix_tree_preload( 382 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); 383 if (err) { 384 if (pmd_downgrade) 385 put_locked_mapping_entry(mapping, index); 386 return ERR_PTR(err); 387 } 388 spin_lock_irq(&mapping->tree_lock); 389 390 if (!entry) { 391 /* 392 * We needed to drop the page_tree lock while calling 393 * radix_tree_preload() and we didn't have an entry to 394 * lock. See if another thread inserted an entry at 395 * our index during this time. 396 */ 397 entry = __radix_tree_lookup(&mapping->page_tree, index, 398 NULL, &slot); 399 if (entry) { 400 radix_tree_preload_end(); 401 spin_unlock_irq(&mapping->tree_lock); 402 goto restart; 403 } 404 } 405 406 if (pmd_downgrade) { 407 radix_tree_delete(&mapping->page_tree, index); 408 mapping->nrexceptional--; 409 dax_wake_mapping_entry_waiter(mapping, index, entry, 410 true); 411 } 412 413 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY); 414 415 err = __radix_tree_insert(&mapping->page_tree, index, 416 dax_radix_order(entry), entry); 417 radix_tree_preload_end(); 418 if (err) { 419 spin_unlock_irq(&mapping->tree_lock); 420 /* 421 * Our insertion of a DAX entry failed, most likely 422 * because we were inserting a PMD entry and it 423 * collided with a PTE sized entry at a different 424 * index in the PMD range. We haven't inserted 425 * anything into the radix tree and have no waiters to 426 * wake. 427 */ 428 return ERR_PTR(err); 429 } 430 /* Good, we have inserted empty locked entry into the tree. */ 431 mapping->nrexceptional++; 432 spin_unlock_irq(&mapping->tree_lock); 433 return entry; 434 } 435 entry = lock_slot(mapping, slot); 436 out_unlock: 437 spin_unlock_irq(&mapping->tree_lock); 438 return entry; 439 } 440 441 static int __dax_invalidate_mapping_entry(struct address_space *mapping, 442 pgoff_t index, bool trunc) 443 { 444 int ret = 0; 445 void *entry; 446 struct radix_tree_root *page_tree = &mapping->page_tree; 447 448 spin_lock_irq(&mapping->tree_lock); 449 entry = get_unlocked_mapping_entry(mapping, index, NULL); 450 if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry))) 451 goto out; 452 if (!trunc && 453 (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) || 454 radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))) 455 goto out; 456 radix_tree_delete(page_tree, index); 457 mapping->nrexceptional--; 458 ret = 1; 459 out: 460 put_unlocked_mapping_entry(mapping, index, entry); 461 spin_unlock_irq(&mapping->tree_lock); 462 return ret; 463 } 464 /* 465 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree 466 * entry to get unlocked before deleting it. 467 */ 468 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 469 { 470 int ret = __dax_invalidate_mapping_entry(mapping, index, true); 471 472 /* 473 * This gets called from truncate / punch_hole path. As such, the caller 474 * must hold locks protecting against concurrent modifications of the 475 * radix tree (usually fs-private i_mmap_sem for writing). Since the 476 * caller has seen exceptional entry for this index, we better find it 477 * at that index as well... 478 */ 479 WARN_ON_ONCE(!ret); 480 return ret; 481 } 482 483 /* 484 * Invalidate exceptional DAX entry if it is clean. 485 */ 486 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 487 pgoff_t index) 488 { 489 return __dax_invalidate_mapping_entry(mapping, index, false); 490 } 491 492 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, 493 sector_t sector, size_t size, struct page *to, 494 unsigned long vaddr) 495 { 496 void *vto, *kaddr; 497 pgoff_t pgoff; 498 pfn_t pfn; 499 long rc; 500 int id; 501 502 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 503 if (rc) 504 return rc; 505 506 id = dax_read_lock(); 507 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); 508 if (rc < 0) { 509 dax_read_unlock(id); 510 return rc; 511 } 512 vto = kmap_atomic(to); 513 copy_user_page(vto, (void __force *)kaddr, vaddr, to); 514 kunmap_atomic(vto); 515 dax_read_unlock(id); 516 return 0; 517 } 518 519 /* 520 * By this point grab_mapping_entry() has ensured that we have a locked entry 521 * of the appropriate size so we don't have to worry about downgrading PMDs to 522 * PTEs. If we happen to be trying to insert a PTE and there is a PMD 523 * already in the tree, we will skip the insertion and just dirty the PMD as 524 * appropriate. 525 */ 526 static void *dax_insert_mapping_entry(struct address_space *mapping, 527 struct vm_fault *vmf, 528 void *entry, sector_t sector, 529 unsigned long flags) 530 { 531 struct radix_tree_root *page_tree = &mapping->page_tree; 532 void *new_entry; 533 pgoff_t index = vmf->pgoff; 534 535 if (vmf->flags & FAULT_FLAG_WRITE) 536 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 537 538 if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) { 539 /* we are replacing a zero page with block mapping */ 540 if (dax_is_pmd_entry(entry)) 541 unmap_mapping_range(mapping, 542 (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, 543 PMD_SIZE, 0); 544 else /* pte entry */ 545 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, 546 PAGE_SIZE, 0); 547 } 548 549 spin_lock_irq(&mapping->tree_lock); 550 new_entry = dax_radix_locked_entry(sector, flags); 551 552 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 553 /* 554 * Only swap our new entry into the radix tree if the current 555 * entry is a zero page or an empty entry. If a normal PTE or 556 * PMD entry is already in the tree, we leave it alone. This 557 * means that if we are trying to insert a PTE and the 558 * existing entry is a PMD, we will just leave the PMD in the 559 * tree and dirty it if necessary. 560 */ 561 struct radix_tree_node *node; 562 void **slot; 563 void *ret; 564 565 ret = __radix_tree_lookup(page_tree, index, &node, &slot); 566 WARN_ON_ONCE(ret != entry); 567 __radix_tree_replace(page_tree, node, slot, 568 new_entry, NULL); 569 entry = new_entry; 570 } 571 572 if (vmf->flags & FAULT_FLAG_WRITE) 573 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY); 574 575 spin_unlock_irq(&mapping->tree_lock); 576 return entry; 577 } 578 579 static inline unsigned long 580 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma) 581 { 582 unsigned long address; 583 584 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 585 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 586 return address; 587 } 588 589 /* Walk all mappings of a given index of a file and writeprotect them */ 590 static void dax_mapping_entry_mkclean(struct address_space *mapping, 591 pgoff_t index, unsigned long pfn) 592 { 593 struct vm_area_struct *vma; 594 pte_t pte, *ptep = NULL; 595 pmd_t *pmdp = NULL; 596 spinlock_t *ptl; 597 598 i_mmap_lock_read(mapping); 599 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 600 unsigned long address, start, end; 601 602 cond_resched(); 603 604 if (!(vma->vm_flags & VM_SHARED)) 605 continue; 606 607 address = pgoff_address(index, vma); 608 609 /* 610 * Note because we provide start/end to follow_pte_pmd it will 611 * call mmu_notifier_invalidate_range_start() on our behalf 612 * before taking any lock. 613 */ 614 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl)) 615 continue; 616 617 /* 618 * No need to call mmu_notifier_invalidate_range() as we are 619 * downgrading page table protection not changing it to point 620 * to a new page. 621 * 622 * See Documentation/vm/mmu_notifier.txt 623 */ 624 if (pmdp) { 625 #ifdef CONFIG_FS_DAX_PMD 626 pmd_t pmd; 627 628 if (pfn != pmd_pfn(*pmdp)) 629 goto unlock_pmd; 630 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) 631 goto unlock_pmd; 632 633 flush_cache_page(vma, address, pfn); 634 pmd = pmdp_huge_clear_flush(vma, address, pmdp); 635 pmd = pmd_wrprotect(pmd); 636 pmd = pmd_mkclean(pmd); 637 set_pmd_at(vma->vm_mm, address, pmdp, pmd); 638 unlock_pmd: 639 spin_unlock(ptl); 640 #endif 641 } else { 642 if (pfn != pte_pfn(*ptep)) 643 goto unlock_pte; 644 if (!pte_dirty(*ptep) && !pte_write(*ptep)) 645 goto unlock_pte; 646 647 flush_cache_page(vma, address, pfn); 648 pte = ptep_clear_flush(vma, address, ptep); 649 pte = pte_wrprotect(pte); 650 pte = pte_mkclean(pte); 651 set_pte_at(vma->vm_mm, address, ptep, pte); 652 unlock_pte: 653 pte_unmap_unlock(ptep, ptl); 654 } 655 656 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); 657 } 658 i_mmap_unlock_read(mapping); 659 } 660 661 static int dax_writeback_one(struct block_device *bdev, 662 struct dax_device *dax_dev, struct address_space *mapping, 663 pgoff_t index, void *entry) 664 { 665 struct radix_tree_root *page_tree = &mapping->page_tree; 666 void *entry2, **slot, *kaddr; 667 long ret = 0, id; 668 sector_t sector; 669 pgoff_t pgoff; 670 size_t size; 671 pfn_t pfn; 672 673 /* 674 * A page got tagged dirty in DAX mapping? Something is seriously 675 * wrong. 676 */ 677 if (WARN_ON(!radix_tree_exceptional_entry(entry))) 678 return -EIO; 679 680 spin_lock_irq(&mapping->tree_lock); 681 entry2 = get_unlocked_mapping_entry(mapping, index, &slot); 682 /* Entry got punched out / reallocated? */ 683 if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2))) 684 goto put_unlocked; 685 /* 686 * Entry got reallocated elsewhere? No need to writeback. We have to 687 * compare sectors as we must not bail out due to difference in lockbit 688 * or entry type. 689 */ 690 if (dax_radix_sector(entry2) != dax_radix_sector(entry)) 691 goto put_unlocked; 692 if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 693 dax_is_zero_entry(entry))) { 694 ret = -EIO; 695 goto put_unlocked; 696 } 697 698 /* Another fsync thread may have already written back this entry */ 699 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)) 700 goto put_unlocked; 701 /* Lock the entry to serialize with page faults */ 702 entry = lock_slot(mapping, slot); 703 /* 704 * We can clear the tag now but we have to be careful so that concurrent 705 * dax_writeback_one() calls for the same index cannot finish before we 706 * actually flush the caches. This is achieved as the calls will look 707 * at the entry only under tree_lock and once they do that they will 708 * see the entry locked and wait for it to unlock. 709 */ 710 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE); 711 spin_unlock_irq(&mapping->tree_lock); 712 713 /* 714 * Even if dax_writeback_mapping_range() was given a wbc->range_start 715 * in the middle of a PMD, the 'index' we are given will be aligned to 716 * the start index of the PMD, as will the sector we pull from 717 * 'entry'. This allows us to flush for PMD_SIZE and not have to 718 * worry about partial PMD writebacks. 719 */ 720 sector = dax_radix_sector(entry); 721 size = PAGE_SIZE << dax_radix_order(entry); 722 723 id = dax_read_lock(); 724 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 725 if (ret) 726 goto dax_unlock; 727 728 /* 729 * dax_direct_access() may sleep, so cannot hold tree_lock over 730 * its invocation. 731 */ 732 ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn); 733 if (ret < 0) 734 goto dax_unlock; 735 736 if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) { 737 ret = -EIO; 738 goto dax_unlock; 739 } 740 741 dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn)); 742 dax_flush(dax_dev, kaddr, size); 743 /* 744 * After we have flushed the cache, we can clear the dirty tag. There 745 * cannot be new dirty data in the pfn after the flush has completed as 746 * the pfn mappings are writeprotected and fault waits for mapping 747 * entry lock. 748 */ 749 spin_lock_irq(&mapping->tree_lock); 750 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY); 751 spin_unlock_irq(&mapping->tree_lock); 752 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT); 753 dax_unlock: 754 dax_read_unlock(id); 755 put_locked_mapping_entry(mapping, index); 756 return ret; 757 758 put_unlocked: 759 put_unlocked_mapping_entry(mapping, index, entry2); 760 spin_unlock_irq(&mapping->tree_lock); 761 return ret; 762 } 763 764 /* 765 * Flush the mapping to the persistent domain within the byte range of [start, 766 * end]. This is required by data integrity operations to ensure file data is 767 * on persistent storage prior to completion of the operation. 768 */ 769 int dax_writeback_mapping_range(struct address_space *mapping, 770 struct block_device *bdev, struct writeback_control *wbc) 771 { 772 struct inode *inode = mapping->host; 773 pgoff_t start_index, end_index; 774 pgoff_t indices[PAGEVEC_SIZE]; 775 struct dax_device *dax_dev; 776 struct pagevec pvec; 777 bool done = false; 778 int i, ret = 0; 779 780 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 781 return -EIO; 782 783 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 784 return 0; 785 786 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 787 if (!dax_dev) 788 return -EIO; 789 790 start_index = wbc->range_start >> PAGE_SHIFT; 791 end_index = wbc->range_end >> PAGE_SHIFT; 792 793 trace_dax_writeback_range(inode, start_index, end_index); 794 795 tag_pages_for_writeback(mapping, start_index, end_index); 796 797 pagevec_init(&pvec); 798 while (!done) { 799 pvec.nr = find_get_entries_tag(mapping, start_index, 800 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE, 801 pvec.pages, indices); 802 803 if (pvec.nr == 0) 804 break; 805 806 for (i = 0; i < pvec.nr; i++) { 807 if (indices[i] > end_index) { 808 done = true; 809 break; 810 } 811 812 ret = dax_writeback_one(bdev, dax_dev, mapping, 813 indices[i], pvec.pages[i]); 814 if (ret < 0) { 815 mapping_set_error(mapping, ret); 816 goto out; 817 } 818 } 819 start_index = indices[pvec.nr - 1] + 1; 820 } 821 out: 822 put_dax(dax_dev); 823 trace_dax_writeback_range_done(inode, start_index, end_index); 824 return (ret < 0 ? ret : 0); 825 } 826 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 827 828 static int dax_insert_mapping(struct address_space *mapping, 829 struct block_device *bdev, struct dax_device *dax_dev, 830 sector_t sector, size_t size, void *entry, 831 struct vm_area_struct *vma, struct vm_fault *vmf) 832 { 833 unsigned long vaddr = vmf->address; 834 void *ret, *kaddr; 835 pgoff_t pgoff; 836 int id, rc; 837 pfn_t pfn; 838 839 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 840 if (rc) 841 return rc; 842 843 id = dax_read_lock(); 844 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); 845 if (rc < 0) { 846 dax_read_unlock(id); 847 return rc; 848 } 849 dax_read_unlock(id); 850 851 ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0); 852 if (IS_ERR(ret)) 853 return PTR_ERR(ret); 854 855 trace_dax_insert_mapping(mapping->host, vmf, ret); 856 if (vmf->flags & FAULT_FLAG_WRITE) 857 return vm_insert_mixed_mkwrite(vma, vaddr, pfn); 858 else 859 return vm_insert_mixed(vma, vaddr, pfn); 860 } 861 862 /* 863 * The user has performed a load from a hole in the file. Allocating a new 864 * page in the file would cause excessive storage usage for workloads with 865 * sparse files. Instead we insert a read-only mapping of the 4k zero page. 866 * If this page is ever written to we will re-fault and change the mapping to 867 * point to real DAX storage instead. 868 */ 869 static int dax_load_hole(struct address_space *mapping, void *entry, 870 struct vm_fault *vmf) 871 { 872 struct inode *inode = mapping->host; 873 unsigned long vaddr = vmf->address; 874 int ret = VM_FAULT_NOPAGE; 875 struct page *zero_page; 876 void *entry2; 877 878 zero_page = ZERO_PAGE(0); 879 if (unlikely(!zero_page)) { 880 ret = VM_FAULT_OOM; 881 goto out; 882 } 883 884 entry2 = dax_insert_mapping_entry(mapping, vmf, entry, 0, 885 RADIX_DAX_ZERO_PAGE); 886 if (IS_ERR(entry2)) { 887 ret = VM_FAULT_SIGBUS; 888 goto out; 889 } 890 891 vm_insert_mixed(vmf->vma, vaddr, page_to_pfn_t(zero_page)); 892 out: 893 trace_dax_load_hole(inode, vmf, ret); 894 return ret; 895 } 896 897 static bool dax_range_is_aligned(struct block_device *bdev, 898 unsigned int offset, unsigned int length) 899 { 900 unsigned short sector_size = bdev_logical_block_size(bdev); 901 902 if (!IS_ALIGNED(offset, sector_size)) 903 return false; 904 if (!IS_ALIGNED(length, sector_size)) 905 return false; 906 907 return true; 908 } 909 910 int __dax_zero_page_range(struct block_device *bdev, 911 struct dax_device *dax_dev, sector_t sector, 912 unsigned int offset, unsigned int size) 913 { 914 if (dax_range_is_aligned(bdev, offset, size)) { 915 sector_t start_sector = sector + (offset >> 9); 916 917 return blkdev_issue_zeroout(bdev, start_sector, 918 size >> 9, GFP_NOFS, 0); 919 } else { 920 pgoff_t pgoff; 921 long rc, id; 922 void *kaddr; 923 pfn_t pfn; 924 925 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); 926 if (rc) 927 return rc; 928 929 id = dax_read_lock(); 930 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, 931 &pfn); 932 if (rc < 0) { 933 dax_read_unlock(id); 934 return rc; 935 } 936 memset(kaddr + offset, 0, size); 937 dax_flush(dax_dev, kaddr + offset, size); 938 dax_read_unlock(id); 939 } 940 return 0; 941 } 942 EXPORT_SYMBOL_GPL(__dax_zero_page_range); 943 944 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 945 { 946 return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9; 947 } 948 949 static loff_t 950 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 951 struct iomap *iomap) 952 { 953 struct block_device *bdev = iomap->bdev; 954 struct dax_device *dax_dev = iomap->dax_dev; 955 struct iov_iter *iter = data; 956 loff_t end = pos + length, done = 0; 957 ssize_t ret = 0; 958 int id; 959 960 if (iov_iter_rw(iter) == READ) { 961 end = min(end, i_size_read(inode)); 962 if (pos >= end) 963 return 0; 964 965 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 966 return iov_iter_zero(min(length, end - pos), iter); 967 } 968 969 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 970 return -EIO; 971 972 /* 973 * Write can allocate block for an area which has a hole page mapped 974 * into page tables. We have to tear down these mappings so that data 975 * written by write(2) is visible in mmap. 976 */ 977 if (iomap->flags & IOMAP_F_NEW) { 978 invalidate_inode_pages2_range(inode->i_mapping, 979 pos >> PAGE_SHIFT, 980 (end - 1) >> PAGE_SHIFT); 981 } 982 983 id = dax_read_lock(); 984 while (pos < end) { 985 unsigned offset = pos & (PAGE_SIZE - 1); 986 const size_t size = ALIGN(length + offset, PAGE_SIZE); 987 const sector_t sector = dax_iomap_sector(iomap, pos); 988 ssize_t map_len; 989 pgoff_t pgoff; 990 void *kaddr; 991 pfn_t pfn; 992 993 if (fatal_signal_pending(current)) { 994 ret = -EINTR; 995 break; 996 } 997 998 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 999 if (ret) 1000 break; 1001 1002 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 1003 &kaddr, &pfn); 1004 if (map_len < 0) { 1005 ret = map_len; 1006 break; 1007 } 1008 1009 map_len = PFN_PHYS(map_len); 1010 kaddr += offset; 1011 map_len -= offset; 1012 if (map_len > end - pos) 1013 map_len = end - pos; 1014 1015 /* 1016 * The userspace address for the memory copy has already been 1017 * validated via access_ok() in either vfs_read() or 1018 * vfs_write(), depending on which operation we are doing. 1019 */ 1020 if (iov_iter_rw(iter) == WRITE) 1021 map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1022 map_len, iter); 1023 else 1024 map_len = copy_to_iter(kaddr, map_len, iter); 1025 if (map_len <= 0) { 1026 ret = map_len ? map_len : -EFAULT; 1027 break; 1028 } 1029 1030 pos += map_len; 1031 length -= map_len; 1032 done += map_len; 1033 } 1034 dax_read_unlock(id); 1035 1036 return done ? done : ret; 1037 } 1038 1039 /** 1040 * dax_iomap_rw - Perform I/O to a DAX file 1041 * @iocb: The control block for this I/O 1042 * @iter: The addresses to do I/O from or to 1043 * @ops: iomap ops passed from the file system 1044 * 1045 * This function performs read and write operations to directly mapped 1046 * persistent memory. The callers needs to take care of read/write exclusion 1047 * and evicting any page cache pages in the region under I/O. 1048 */ 1049 ssize_t 1050 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 1051 const struct iomap_ops *ops) 1052 { 1053 struct address_space *mapping = iocb->ki_filp->f_mapping; 1054 struct inode *inode = mapping->host; 1055 loff_t pos = iocb->ki_pos, ret = 0, done = 0; 1056 unsigned flags = 0; 1057 1058 if (iov_iter_rw(iter) == WRITE) { 1059 lockdep_assert_held_exclusive(&inode->i_rwsem); 1060 flags |= IOMAP_WRITE; 1061 } else { 1062 lockdep_assert_held(&inode->i_rwsem); 1063 } 1064 1065 while (iov_iter_count(iter)) { 1066 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, 1067 iter, dax_iomap_actor); 1068 if (ret <= 0) 1069 break; 1070 pos += ret; 1071 done += ret; 1072 } 1073 1074 iocb->ki_pos += done; 1075 return done ? done : ret; 1076 } 1077 EXPORT_SYMBOL_GPL(dax_iomap_rw); 1078 1079 static int dax_fault_return(int error) 1080 { 1081 if (error == 0) 1082 return VM_FAULT_NOPAGE; 1083 if (error == -ENOMEM) 1084 return VM_FAULT_OOM; 1085 return VM_FAULT_SIGBUS; 1086 } 1087 1088 static int dax_iomap_pte_fault(struct vm_fault *vmf, 1089 const struct iomap_ops *ops) 1090 { 1091 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1092 struct inode *inode = mapping->host; 1093 unsigned long vaddr = vmf->address; 1094 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; 1095 sector_t sector; 1096 struct iomap iomap = { 0 }; 1097 unsigned flags = IOMAP_FAULT; 1098 int error, major = 0; 1099 int vmf_ret = 0; 1100 void *entry; 1101 1102 trace_dax_pte_fault(inode, vmf, vmf_ret); 1103 /* 1104 * Check whether offset isn't beyond end of file now. Caller is supposed 1105 * to hold locks serializing us with truncate / punch hole so this is 1106 * a reliable test. 1107 */ 1108 if (pos >= i_size_read(inode)) { 1109 vmf_ret = VM_FAULT_SIGBUS; 1110 goto out; 1111 } 1112 1113 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) 1114 flags |= IOMAP_WRITE; 1115 1116 entry = grab_mapping_entry(mapping, vmf->pgoff, 0); 1117 if (IS_ERR(entry)) { 1118 vmf_ret = dax_fault_return(PTR_ERR(entry)); 1119 goto out; 1120 } 1121 1122 /* 1123 * It is possible, particularly with mixed reads & writes to private 1124 * mappings, that we have raced with a PMD fault that overlaps with 1125 * the PTE we need to set up. If so just return and the fault will be 1126 * retried. 1127 */ 1128 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1129 vmf_ret = VM_FAULT_NOPAGE; 1130 goto unlock_entry; 1131 } 1132 1133 /* 1134 * Note that we don't bother to use iomap_apply here: DAX required 1135 * the file system block size to be equal the page size, which means 1136 * that we never have to deal with more than a single extent here. 1137 */ 1138 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); 1139 if (error) { 1140 vmf_ret = dax_fault_return(error); 1141 goto unlock_entry; 1142 } 1143 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 1144 error = -EIO; /* fs corruption? */ 1145 goto error_finish_iomap; 1146 } 1147 1148 sector = dax_iomap_sector(&iomap, pos); 1149 1150 if (vmf->cow_page) { 1151 switch (iomap.type) { 1152 case IOMAP_HOLE: 1153 case IOMAP_UNWRITTEN: 1154 clear_user_highpage(vmf->cow_page, vaddr); 1155 break; 1156 case IOMAP_MAPPED: 1157 error = copy_user_dax(iomap.bdev, iomap.dax_dev, 1158 sector, PAGE_SIZE, vmf->cow_page, vaddr); 1159 break; 1160 default: 1161 WARN_ON_ONCE(1); 1162 error = -EIO; 1163 break; 1164 } 1165 1166 if (error) 1167 goto error_finish_iomap; 1168 1169 __SetPageUptodate(vmf->cow_page); 1170 vmf_ret = finish_fault(vmf); 1171 if (!vmf_ret) 1172 vmf_ret = VM_FAULT_DONE_COW; 1173 goto finish_iomap; 1174 } 1175 1176 switch (iomap.type) { 1177 case IOMAP_MAPPED: 1178 if (iomap.flags & IOMAP_F_NEW) { 1179 count_vm_event(PGMAJFAULT); 1180 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); 1181 major = VM_FAULT_MAJOR; 1182 } 1183 error = dax_insert_mapping(mapping, iomap.bdev, iomap.dax_dev, 1184 sector, PAGE_SIZE, entry, vmf->vma, vmf); 1185 /* -EBUSY is fine, somebody else faulted on the same PTE */ 1186 if (error == -EBUSY) 1187 error = 0; 1188 break; 1189 case IOMAP_UNWRITTEN: 1190 case IOMAP_HOLE: 1191 if (!(vmf->flags & FAULT_FLAG_WRITE)) { 1192 vmf_ret = dax_load_hole(mapping, entry, vmf); 1193 goto finish_iomap; 1194 } 1195 /*FALLTHRU*/ 1196 default: 1197 WARN_ON_ONCE(1); 1198 error = -EIO; 1199 break; 1200 } 1201 1202 error_finish_iomap: 1203 vmf_ret = dax_fault_return(error) | major; 1204 finish_iomap: 1205 if (ops->iomap_end) { 1206 int copied = PAGE_SIZE; 1207 1208 if (vmf_ret & VM_FAULT_ERROR) 1209 copied = 0; 1210 /* 1211 * The fault is done by now and there's no way back (other 1212 * thread may be already happily using PTE we have installed). 1213 * Just ignore error from ->iomap_end since we cannot do much 1214 * with it. 1215 */ 1216 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); 1217 } 1218 unlock_entry: 1219 put_locked_mapping_entry(mapping, vmf->pgoff); 1220 out: 1221 trace_dax_pte_fault_done(inode, vmf, vmf_ret); 1222 return vmf_ret; 1223 } 1224 1225 #ifdef CONFIG_FS_DAX_PMD 1226 static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap, 1227 loff_t pos, void *entry) 1228 { 1229 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1230 const sector_t sector = dax_iomap_sector(iomap, pos); 1231 struct dax_device *dax_dev = iomap->dax_dev; 1232 struct block_device *bdev = iomap->bdev; 1233 struct inode *inode = mapping->host; 1234 const size_t size = PMD_SIZE; 1235 void *ret = NULL, *kaddr; 1236 long length = 0; 1237 pgoff_t pgoff; 1238 pfn_t pfn = {}; 1239 int id; 1240 1241 if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0) 1242 goto fallback; 1243 1244 id = dax_read_lock(); 1245 length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); 1246 if (length < 0) 1247 goto unlock_fallback; 1248 length = PFN_PHYS(length); 1249 1250 if (length < size) 1251 goto unlock_fallback; 1252 if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR) 1253 goto unlock_fallback; 1254 if (!pfn_t_devmap(pfn)) 1255 goto unlock_fallback; 1256 dax_read_unlock(id); 1257 1258 ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 1259 RADIX_DAX_PMD); 1260 if (IS_ERR(ret)) 1261 goto fallback; 1262 1263 trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret); 1264 return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, 1265 pfn, vmf->flags & FAULT_FLAG_WRITE); 1266 1267 unlock_fallback: 1268 dax_read_unlock(id); 1269 fallback: 1270 trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret); 1271 return VM_FAULT_FALLBACK; 1272 } 1273 1274 static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap, 1275 void *entry) 1276 { 1277 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1278 unsigned long pmd_addr = vmf->address & PMD_MASK; 1279 struct inode *inode = mapping->host; 1280 struct page *zero_page; 1281 void *ret = NULL; 1282 spinlock_t *ptl; 1283 pmd_t pmd_entry; 1284 1285 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1286 1287 if (unlikely(!zero_page)) 1288 goto fallback; 1289 1290 ret = dax_insert_mapping_entry(mapping, vmf, entry, 0, 1291 RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE); 1292 if (IS_ERR(ret)) 1293 goto fallback; 1294 1295 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1296 if (!pmd_none(*(vmf->pmd))) { 1297 spin_unlock(ptl); 1298 goto fallback; 1299 } 1300 1301 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1302 pmd_entry = pmd_mkhuge(pmd_entry); 1303 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1304 spin_unlock(ptl); 1305 trace_dax_pmd_load_hole(inode, vmf, zero_page, ret); 1306 return VM_FAULT_NOPAGE; 1307 1308 fallback: 1309 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret); 1310 return VM_FAULT_FALLBACK; 1311 } 1312 1313 static int dax_iomap_pmd_fault(struct vm_fault *vmf, 1314 const struct iomap_ops *ops) 1315 { 1316 struct vm_area_struct *vma = vmf->vma; 1317 struct address_space *mapping = vma->vm_file->f_mapping; 1318 unsigned long pmd_addr = vmf->address & PMD_MASK; 1319 bool write = vmf->flags & FAULT_FLAG_WRITE; 1320 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; 1321 struct inode *inode = mapping->host; 1322 int result = VM_FAULT_FALLBACK; 1323 struct iomap iomap = { 0 }; 1324 pgoff_t max_pgoff, pgoff; 1325 void *entry; 1326 loff_t pos; 1327 int error; 1328 1329 /* 1330 * Check whether offset isn't beyond end of file now. Caller is 1331 * supposed to hold locks serializing us with truncate / punch hole so 1332 * this is a reliable test. 1333 */ 1334 pgoff = linear_page_index(vma, pmd_addr); 1335 max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT; 1336 1337 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); 1338 1339 /* 1340 * Make sure that the faulting address's PMD offset (color) matches 1341 * the PMD offset from the start of the file. This is necessary so 1342 * that a PMD range in the page table overlaps exactly with a PMD 1343 * range in the radix tree. 1344 */ 1345 if ((vmf->pgoff & PG_PMD_COLOUR) != 1346 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 1347 goto fallback; 1348 1349 /* Fall back to PTEs if we're going to COW */ 1350 if (write && !(vma->vm_flags & VM_SHARED)) 1351 goto fallback; 1352 1353 /* If the PMD would extend outside the VMA */ 1354 if (pmd_addr < vma->vm_start) 1355 goto fallback; 1356 if ((pmd_addr + PMD_SIZE) > vma->vm_end) 1357 goto fallback; 1358 1359 if (pgoff > max_pgoff) { 1360 result = VM_FAULT_SIGBUS; 1361 goto out; 1362 } 1363 1364 /* If the PMD would extend beyond the file size */ 1365 if ((pgoff | PG_PMD_COLOUR) > max_pgoff) 1366 goto fallback; 1367 1368 /* 1369 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a 1370 * 2MiB zero page entry or a DAX PMD. If it can't (because a 4k page 1371 * is already in the tree, for instance), it will return -EEXIST and 1372 * we just fall back to 4k entries. 1373 */ 1374 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); 1375 if (IS_ERR(entry)) 1376 goto fallback; 1377 1378 /* 1379 * It is possible, particularly with mixed reads & writes to private 1380 * mappings, that we have raced with a PTE fault that overlaps with 1381 * the PMD we need to set up. If so just return and the fault will be 1382 * retried. 1383 */ 1384 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1385 !pmd_devmap(*vmf->pmd)) { 1386 result = 0; 1387 goto unlock_entry; 1388 } 1389 1390 /* 1391 * Note that we don't use iomap_apply here. We aren't doing I/O, only 1392 * setting up a mapping, so really we're using iomap_begin() as a way 1393 * to look up our filesystem block. 1394 */ 1395 pos = (loff_t)pgoff << PAGE_SHIFT; 1396 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); 1397 if (error) 1398 goto unlock_entry; 1399 1400 if (iomap.offset + iomap.length < pos + PMD_SIZE) 1401 goto finish_iomap; 1402 1403 switch (iomap.type) { 1404 case IOMAP_MAPPED: 1405 result = dax_pmd_insert_mapping(vmf, &iomap, pos, entry); 1406 break; 1407 case IOMAP_UNWRITTEN: 1408 case IOMAP_HOLE: 1409 if (WARN_ON_ONCE(write)) 1410 break; 1411 result = dax_pmd_load_hole(vmf, &iomap, entry); 1412 break; 1413 default: 1414 WARN_ON_ONCE(1); 1415 break; 1416 } 1417 1418 finish_iomap: 1419 if (ops->iomap_end) { 1420 int copied = PMD_SIZE; 1421 1422 if (result == VM_FAULT_FALLBACK) 1423 copied = 0; 1424 /* 1425 * The fault is done by now and there's no way back (other 1426 * thread may be already happily using PMD we have installed). 1427 * Just ignore error from ->iomap_end since we cannot do much 1428 * with it. 1429 */ 1430 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, 1431 &iomap); 1432 } 1433 unlock_entry: 1434 put_locked_mapping_entry(mapping, pgoff); 1435 fallback: 1436 if (result == VM_FAULT_FALLBACK) { 1437 split_huge_pmd(vma, vmf->pmd, vmf->address); 1438 count_vm_event(THP_FAULT_FALLBACK); 1439 } 1440 out: 1441 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); 1442 return result; 1443 } 1444 #else 1445 static int dax_iomap_pmd_fault(struct vm_fault *vmf, 1446 const struct iomap_ops *ops) 1447 { 1448 return VM_FAULT_FALLBACK; 1449 } 1450 #endif /* CONFIG_FS_DAX_PMD */ 1451 1452 /** 1453 * dax_iomap_fault - handle a page fault on a DAX file 1454 * @vmf: The description of the fault 1455 * @ops: iomap ops passed from the file system 1456 * 1457 * When a page fault occurs, filesystems may call this helper in 1458 * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1459 * has done all the necessary locking for page fault to proceed 1460 * successfully. 1461 */ 1462 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1463 const struct iomap_ops *ops) 1464 { 1465 switch (pe_size) { 1466 case PE_SIZE_PTE: 1467 return dax_iomap_pte_fault(vmf, ops); 1468 case PE_SIZE_PMD: 1469 return dax_iomap_pmd_fault(vmf, ops); 1470 default: 1471 return VM_FAULT_FALLBACK; 1472 } 1473 } 1474 EXPORT_SYMBOL_GPL(dax_iomap_fault); 1475