1 /* 2 * fs/dax.c - Direct Access filesystem code 3 * Copyright (c) 2013-2014 Intel Corporation 4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 */ 16 17 #include <linux/atomic.h> 18 #include <linux/blkdev.h> 19 #include <linux/buffer_head.h> 20 #include <linux/dax.h> 21 #include <linux/fs.h> 22 #include <linux/genhd.h> 23 #include <linux/highmem.h> 24 #include <linux/memcontrol.h> 25 #include <linux/mm.h> 26 #include <linux/mutex.h> 27 #include <linux/pagevec.h> 28 #include <linux/sched.h> 29 #include <linux/sched/signal.h> 30 #include <linux/uio.h> 31 #include <linux/vmstat.h> 32 #include <linux/pfn_t.h> 33 #include <linux/sizes.h> 34 #include <linux/mmu_notifier.h> 35 #include <linux/iomap.h> 36 #include "internal.h" 37 38 #define CREATE_TRACE_POINTS 39 #include <trace/events/fs_dax.h> 40 41 /* We choose 4096 entries - same as per-zone page wait tables */ 42 #define DAX_WAIT_TABLE_BITS 12 43 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 44 45 /* The 'colour' (ie low bits) within a PMD of a page offset. */ 46 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 47 #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) 48 49 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 50 51 static int __init init_dax_wait_table(void) 52 { 53 int i; 54 55 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 56 init_waitqueue_head(wait_table + i); 57 return 0; 58 } 59 fs_initcall(init_dax_wait_table); 60 61 /* 62 * We use lowest available bit in exceptional entry for locking, one bit for 63 * the entry size (PMD) and two more to tell us if the entry is a zero page or 64 * an empty entry that is just used for locking. In total four special bits. 65 * 66 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 67 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 68 * block allocation. 69 */ 70 #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4) 71 #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) 72 #define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1)) 73 #define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2)) 74 #define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)) 75 76 static unsigned long dax_radix_pfn(void *entry) 77 { 78 return (unsigned long)entry >> RADIX_DAX_SHIFT; 79 } 80 81 static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags) 82 { 83 return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags | 84 (pfn << RADIX_DAX_SHIFT) | RADIX_DAX_ENTRY_LOCK); 85 } 86 87 static unsigned int dax_radix_order(void *entry) 88 { 89 if ((unsigned long)entry & RADIX_DAX_PMD) 90 return PMD_SHIFT - PAGE_SHIFT; 91 return 0; 92 } 93 94 static int dax_is_pmd_entry(void *entry) 95 { 96 return (unsigned long)entry & RADIX_DAX_PMD; 97 } 98 99 static int dax_is_pte_entry(void *entry) 100 { 101 return !((unsigned long)entry & RADIX_DAX_PMD); 102 } 103 104 static int dax_is_zero_entry(void *entry) 105 { 106 return (unsigned long)entry & RADIX_DAX_ZERO_PAGE; 107 } 108 109 static int dax_is_empty_entry(void *entry) 110 { 111 return (unsigned long)entry & RADIX_DAX_EMPTY; 112 } 113 114 /* 115 * DAX radix tree locking 116 */ 117 struct exceptional_entry_key { 118 struct address_space *mapping; 119 pgoff_t entry_start; 120 }; 121 122 struct wait_exceptional_entry_queue { 123 wait_queue_entry_t wait; 124 struct exceptional_entry_key key; 125 }; 126 127 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, 128 pgoff_t index, void *entry, struct exceptional_entry_key *key) 129 { 130 unsigned long hash; 131 132 /* 133 * If 'entry' is a PMD, align the 'index' that we use for the wait 134 * queue to the start of that PMD. This ensures that all offsets in 135 * the range covered by the PMD map to the same bit lock. 136 */ 137 if (dax_is_pmd_entry(entry)) 138 index &= ~PG_PMD_COLOUR; 139 140 key->mapping = mapping; 141 key->entry_start = index; 142 143 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS); 144 return wait_table + hash; 145 } 146 147 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode, 148 int sync, void *keyp) 149 { 150 struct exceptional_entry_key *key = keyp; 151 struct wait_exceptional_entry_queue *ewait = 152 container_of(wait, struct wait_exceptional_entry_queue, wait); 153 154 if (key->mapping != ewait->key.mapping || 155 key->entry_start != ewait->key.entry_start) 156 return 0; 157 return autoremove_wake_function(wait, mode, sync, NULL); 158 } 159 160 /* 161 * @entry may no longer be the entry at the index in the mapping. 162 * The important information it's conveying is whether the entry at 163 * this index used to be a PMD entry. 164 */ 165 static void dax_wake_mapping_entry_waiter(struct address_space *mapping, 166 pgoff_t index, void *entry, bool wake_all) 167 { 168 struct exceptional_entry_key key; 169 wait_queue_head_t *wq; 170 171 wq = dax_entry_waitqueue(mapping, index, entry, &key); 172 173 /* 174 * Checking for locked entry and prepare_to_wait_exclusive() happens 175 * under the i_pages lock, ditto for entry handling in our callers. 176 * So at this point all tasks that could have seen our entry locked 177 * must be in the waitqueue and the following check will see them. 178 */ 179 if (waitqueue_active(wq)) 180 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 181 } 182 183 /* 184 * Check whether the given slot is locked. Must be called with the i_pages 185 * lock held. 186 */ 187 static inline int slot_locked(struct address_space *mapping, void **slot) 188 { 189 unsigned long entry = (unsigned long) 190 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); 191 return entry & RADIX_DAX_ENTRY_LOCK; 192 } 193 194 /* 195 * Mark the given slot as locked. Must be called with the i_pages lock held. 196 */ 197 static inline void *lock_slot(struct address_space *mapping, void **slot) 198 { 199 unsigned long entry = (unsigned long) 200 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); 201 202 entry |= RADIX_DAX_ENTRY_LOCK; 203 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry); 204 return (void *)entry; 205 } 206 207 /* 208 * Mark the given slot as unlocked. Must be called with the i_pages lock held. 209 */ 210 static inline void *unlock_slot(struct address_space *mapping, void **slot) 211 { 212 unsigned long entry = (unsigned long) 213 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); 214 215 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK; 216 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry); 217 return (void *)entry; 218 } 219 220 /* 221 * Lookup entry in radix tree, wait for it to become unlocked if it is 222 * exceptional entry and return it. The caller must call 223 * put_unlocked_mapping_entry() when he decided not to lock the entry or 224 * put_locked_mapping_entry() when he locked the entry and now wants to 225 * unlock it. 226 * 227 * Must be called with the i_pages lock held. 228 */ 229 static void *get_unlocked_mapping_entry(struct address_space *mapping, 230 pgoff_t index, void ***slotp) 231 { 232 void *entry, **slot; 233 struct wait_exceptional_entry_queue ewait; 234 wait_queue_head_t *wq; 235 236 init_wait(&ewait.wait); 237 ewait.wait.func = wake_exceptional_entry_func; 238 239 for (;;) { 240 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, 241 &slot); 242 if (!entry || 243 WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) || 244 !slot_locked(mapping, slot)) { 245 if (slotp) 246 *slotp = slot; 247 return entry; 248 } 249 250 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); 251 prepare_to_wait_exclusive(wq, &ewait.wait, 252 TASK_UNINTERRUPTIBLE); 253 xa_unlock_irq(&mapping->i_pages); 254 schedule(); 255 finish_wait(wq, &ewait.wait); 256 xa_lock_irq(&mapping->i_pages); 257 } 258 } 259 260 static void dax_unlock_mapping_entry(struct address_space *mapping, 261 pgoff_t index) 262 { 263 void *entry, **slot; 264 265 xa_lock_irq(&mapping->i_pages); 266 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot); 267 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) || 268 !slot_locked(mapping, slot))) { 269 xa_unlock_irq(&mapping->i_pages); 270 return; 271 } 272 unlock_slot(mapping, slot); 273 xa_unlock_irq(&mapping->i_pages); 274 dax_wake_mapping_entry_waiter(mapping, index, entry, false); 275 } 276 277 static void put_locked_mapping_entry(struct address_space *mapping, 278 pgoff_t index) 279 { 280 dax_unlock_mapping_entry(mapping, index); 281 } 282 283 /* 284 * Called when we are done with radix tree entry we looked up via 285 * get_unlocked_mapping_entry() and which we didn't lock in the end. 286 */ 287 static void put_unlocked_mapping_entry(struct address_space *mapping, 288 pgoff_t index, void *entry) 289 { 290 if (!entry) 291 return; 292 293 /* We have to wake up next waiter for the radix tree entry lock */ 294 dax_wake_mapping_entry_waiter(mapping, index, entry, false); 295 } 296 297 static unsigned long dax_entry_size(void *entry) 298 { 299 if (dax_is_zero_entry(entry)) 300 return 0; 301 else if (dax_is_empty_entry(entry)) 302 return 0; 303 else if (dax_is_pmd_entry(entry)) 304 return PMD_SIZE; 305 else 306 return PAGE_SIZE; 307 } 308 309 static unsigned long dax_radix_end_pfn(void *entry) 310 { 311 return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; 312 } 313 314 /* 315 * Iterate through all mapped pfns represented by an entry, i.e. skip 316 * 'empty' and 'zero' entries. 317 */ 318 #define for_each_mapped_pfn(entry, pfn) \ 319 for (pfn = dax_radix_pfn(entry); \ 320 pfn < dax_radix_end_pfn(entry); pfn++) 321 322 static void dax_associate_entry(void *entry, struct address_space *mapping) 323 { 324 unsigned long pfn; 325 326 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 327 return; 328 329 for_each_mapped_pfn(entry, pfn) { 330 struct page *page = pfn_to_page(pfn); 331 332 WARN_ON_ONCE(page->mapping); 333 page->mapping = mapping; 334 } 335 } 336 337 static void dax_disassociate_entry(void *entry, struct address_space *mapping, 338 bool trunc) 339 { 340 unsigned long pfn; 341 342 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 343 return; 344 345 for_each_mapped_pfn(entry, pfn) { 346 struct page *page = pfn_to_page(pfn); 347 348 WARN_ON_ONCE(trunc && page_ref_count(page) > 1); 349 WARN_ON_ONCE(page->mapping && page->mapping != mapping); 350 page->mapping = NULL; 351 } 352 } 353 354 static struct page *dax_busy_page(void *entry) 355 { 356 unsigned long pfn; 357 358 for_each_mapped_pfn(entry, pfn) { 359 struct page *page = pfn_to_page(pfn); 360 361 if (page_ref_count(page) > 1) 362 return page; 363 } 364 return NULL; 365 } 366 367 /* 368 * Find radix tree entry at given index. If it points to an exceptional entry, 369 * return it with the radix tree entry locked. If the radix tree doesn't 370 * contain given index, create an empty exceptional entry for the index and 371 * return with it locked. 372 * 373 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will 374 * either return that locked entry or will return an error. This error will 375 * happen if there are any 4k entries within the 2MiB range that we are 376 * requesting. 377 * 378 * We always favor 4k entries over 2MiB entries. There isn't a flow where we 379 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB 380 * insertion will fail if it finds any 4k entries already in the tree, and a 381 * 4k insertion will cause an existing 2MiB entry to be unmapped and 382 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as 383 * well as 2MiB empty entries. 384 * 385 * The exception to this downgrade path is for 2MiB DAX PMD entries that have 386 * real storage backing them. We will leave these real 2MiB DAX entries in 387 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry. 388 * 389 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 390 * persistent memory the benefit is doubtful. We can add that later if we can 391 * show it helps. 392 */ 393 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, 394 unsigned long size_flag) 395 { 396 bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */ 397 void *entry, **slot; 398 399 restart: 400 xa_lock_irq(&mapping->i_pages); 401 entry = get_unlocked_mapping_entry(mapping, index, &slot); 402 403 if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) { 404 entry = ERR_PTR(-EIO); 405 goto out_unlock; 406 } 407 408 if (entry) { 409 if (size_flag & RADIX_DAX_PMD) { 410 if (dax_is_pte_entry(entry)) { 411 put_unlocked_mapping_entry(mapping, index, 412 entry); 413 entry = ERR_PTR(-EEXIST); 414 goto out_unlock; 415 } 416 } else { /* trying to grab a PTE entry */ 417 if (dax_is_pmd_entry(entry) && 418 (dax_is_zero_entry(entry) || 419 dax_is_empty_entry(entry))) { 420 pmd_downgrade = true; 421 } 422 } 423 } 424 425 /* No entry for given index? Make sure radix tree is big enough. */ 426 if (!entry || pmd_downgrade) { 427 int err; 428 429 if (pmd_downgrade) { 430 /* 431 * Make sure 'entry' remains valid while we drop 432 * the i_pages lock. 433 */ 434 entry = lock_slot(mapping, slot); 435 } 436 437 xa_unlock_irq(&mapping->i_pages); 438 /* 439 * Besides huge zero pages the only other thing that gets 440 * downgraded are empty entries which don't need to be 441 * unmapped. 442 */ 443 if (pmd_downgrade && dax_is_zero_entry(entry)) 444 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 445 PG_PMD_NR, false); 446 447 err = radix_tree_preload( 448 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); 449 if (err) { 450 if (pmd_downgrade) 451 put_locked_mapping_entry(mapping, index); 452 return ERR_PTR(err); 453 } 454 xa_lock_irq(&mapping->i_pages); 455 456 if (!entry) { 457 /* 458 * We needed to drop the i_pages lock while calling 459 * radix_tree_preload() and we didn't have an entry to 460 * lock. See if another thread inserted an entry at 461 * our index during this time. 462 */ 463 entry = __radix_tree_lookup(&mapping->i_pages, index, 464 NULL, &slot); 465 if (entry) { 466 radix_tree_preload_end(); 467 xa_unlock_irq(&mapping->i_pages); 468 goto restart; 469 } 470 } 471 472 if (pmd_downgrade) { 473 dax_disassociate_entry(entry, mapping, false); 474 radix_tree_delete(&mapping->i_pages, index); 475 mapping->nrexceptional--; 476 dax_wake_mapping_entry_waiter(mapping, index, entry, 477 true); 478 } 479 480 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY); 481 482 err = __radix_tree_insert(&mapping->i_pages, index, 483 dax_radix_order(entry), entry); 484 radix_tree_preload_end(); 485 if (err) { 486 xa_unlock_irq(&mapping->i_pages); 487 /* 488 * Our insertion of a DAX entry failed, most likely 489 * because we were inserting a PMD entry and it 490 * collided with a PTE sized entry at a different 491 * index in the PMD range. We haven't inserted 492 * anything into the radix tree and have no waiters to 493 * wake. 494 */ 495 return ERR_PTR(err); 496 } 497 /* Good, we have inserted empty locked entry into the tree. */ 498 mapping->nrexceptional++; 499 xa_unlock_irq(&mapping->i_pages); 500 return entry; 501 } 502 entry = lock_slot(mapping, slot); 503 out_unlock: 504 xa_unlock_irq(&mapping->i_pages); 505 return entry; 506 } 507 508 /** 509 * dax_layout_busy_page - find first pinned page in @mapping 510 * @mapping: address space to scan for a page with ref count > 1 511 * 512 * DAX requires ZONE_DEVICE mapped pages. These pages are never 513 * 'onlined' to the page allocator so they are considered idle when 514 * page->count == 1. A filesystem uses this interface to determine if 515 * any page in the mapping is busy, i.e. for DMA, or other 516 * get_user_pages() usages. 517 * 518 * It is expected that the filesystem is holding locks to block the 519 * establishment of new mappings in this address_space. I.e. it expects 520 * to be able to run unmap_mapping_range() and subsequently not race 521 * mapping_mapped() becoming true. 522 */ 523 struct page *dax_layout_busy_page(struct address_space *mapping) 524 { 525 pgoff_t indices[PAGEVEC_SIZE]; 526 struct page *page = NULL; 527 struct pagevec pvec; 528 pgoff_t index, end; 529 unsigned i; 530 531 /* 532 * In the 'limited' case get_user_pages() for dax is disabled. 533 */ 534 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 535 return NULL; 536 537 if (!dax_mapping(mapping) || !mapping_mapped(mapping)) 538 return NULL; 539 540 pagevec_init(&pvec); 541 index = 0; 542 end = -1; 543 544 /* 545 * If we race get_user_pages_fast() here either we'll see the 546 * elevated page count in the pagevec_lookup and wait, or 547 * get_user_pages_fast() will see that the page it took a reference 548 * against is no longer mapped in the page tables and bail to the 549 * get_user_pages() slow path. The slow path is protected by 550 * pte_lock() and pmd_lock(). New references are not taken without 551 * holding those locks, and unmap_mapping_range() will not zero the 552 * pte or pmd without holding the respective lock, so we are 553 * guaranteed to either see new references or prevent new 554 * references from being established. 555 */ 556 unmap_mapping_range(mapping, 0, 0, 1); 557 558 while (index < end && pagevec_lookup_entries(&pvec, mapping, index, 559 min(end - index, (pgoff_t)PAGEVEC_SIZE), 560 indices)) { 561 for (i = 0; i < pagevec_count(&pvec); i++) { 562 struct page *pvec_ent = pvec.pages[i]; 563 void *entry; 564 565 index = indices[i]; 566 if (index >= end) 567 break; 568 569 if (!radix_tree_exceptional_entry(pvec_ent)) 570 continue; 571 572 xa_lock_irq(&mapping->i_pages); 573 entry = get_unlocked_mapping_entry(mapping, index, NULL); 574 if (entry) 575 page = dax_busy_page(entry); 576 put_unlocked_mapping_entry(mapping, index, entry); 577 xa_unlock_irq(&mapping->i_pages); 578 if (page) 579 break; 580 } 581 pagevec_remove_exceptionals(&pvec); 582 pagevec_release(&pvec); 583 index++; 584 585 if (page) 586 break; 587 } 588 return page; 589 } 590 EXPORT_SYMBOL_GPL(dax_layout_busy_page); 591 592 static int __dax_invalidate_mapping_entry(struct address_space *mapping, 593 pgoff_t index, bool trunc) 594 { 595 int ret = 0; 596 void *entry; 597 struct radix_tree_root *pages = &mapping->i_pages; 598 599 xa_lock_irq(pages); 600 entry = get_unlocked_mapping_entry(mapping, index, NULL); 601 if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry))) 602 goto out; 603 if (!trunc && 604 (radix_tree_tag_get(pages, index, PAGECACHE_TAG_DIRTY) || 605 radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE))) 606 goto out; 607 dax_disassociate_entry(entry, mapping, trunc); 608 radix_tree_delete(pages, index); 609 mapping->nrexceptional--; 610 ret = 1; 611 out: 612 put_unlocked_mapping_entry(mapping, index, entry); 613 xa_unlock_irq(pages); 614 return ret; 615 } 616 /* 617 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree 618 * entry to get unlocked before deleting it. 619 */ 620 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 621 { 622 int ret = __dax_invalidate_mapping_entry(mapping, index, true); 623 624 /* 625 * This gets called from truncate / punch_hole path. As such, the caller 626 * must hold locks protecting against concurrent modifications of the 627 * radix tree (usually fs-private i_mmap_sem for writing). Since the 628 * caller has seen exceptional entry for this index, we better find it 629 * at that index as well... 630 */ 631 WARN_ON_ONCE(!ret); 632 return ret; 633 } 634 635 /* 636 * Invalidate exceptional DAX entry if it is clean. 637 */ 638 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 639 pgoff_t index) 640 { 641 return __dax_invalidate_mapping_entry(mapping, index, false); 642 } 643 644 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, 645 sector_t sector, size_t size, struct page *to, 646 unsigned long vaddr) 647 { 648 void *vto, *kaddr; 649 pgoff_t pgoff; 650 pfn_t pfn; 651 long rc; 652 int id; 653 654 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 655 if (rc) 656 return rc; 657 658 id = dax_read_lock(); 659 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); 660 if (rc < 0) { 661 dax_read_unlock(id); 662 return rc; 663 } 664 vto = kmap_atomic(to); 665 copy_user_page(vto, (void __force *)kaddr, vaddr, to); 666 kunmap_atomic(vto); 667 dax_read_unlock(id); 668 return 0; 669 } 670 671 /* 672 * By this point grab_mapping_entry() has ensured that we have a locked entry 673 * of the appropriate size so we don't have to worry about downgrading PMDs to 674 * PTEs. If we happen to be trying to insert a PTE and there is a PMD 675 * already in the tree, we will skip the insertion and just dirty the PMD as 676 * appropriate. 677 */ 678 static void *dax_insert_mapping_entry(struct address_space *mapping, 679 struct vm_fault *vmf, 680 void *entry, pfn_t pfn_t, 681 unsigned long flags, bool dirty) 682 { 683 struct radix_tree_root *pages = &mapping->i_pages; 684 unsigned long pfn = pfn_t_to_pfn(pfn_t); 685 pgoff_t index = vmf->pgoff; 686 void *new_entry; 687 688 if (dirty) 689 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 690 691 if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) { 692 /* we are replacing a zero page with block mapping */ 693 if (dax_is_pmd_entry(entry)) 694 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 695 PG_PMD_NR, false); 696 else /* pte entry */ 697 unmap_mapping_pages(mapping, vmf->pgoff, 1, false); 698 } 699 700 xa_lock_irq(pages); 701 new_entry = dax_radix_locked_entry(pfn, flags); 702 if (dax_entry_size(entry) != dax_entry_size(new_entry)) { 703 dax_disassociate_entry(entry, mapping, false); 704 dax_associate_entry(new_entry, mapping); 705 } 706 707 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 708 /* 709 * Only swap our new entry into the radix tree if the current 710 * entry is a zero page or an empty entry. If a normal PTE or 711 * PMD entry is already in the tree, we leave it alone. This 712 * means that if we are trying to insert a PTE and the 713 * existing entry is a PMD, we will just leave the PMD in the 714 * tree and dirty it if necessary. 715 */ 716 struct radix_tree_node *node; 717 void **slot; 718 void *ret; 719 720 ret = __radix_tree_lookup(pages, index, &node, &slot); 721 WARN_ON_ONCE(ret != entry); 722 __radix_tree_replace(pages, node, slot, 723 new_entry, NULL); 724 entry = new_entry; 725 } 726 727 if (dirty) 728 radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY); 729 730 xa_unlock_irq(pages); 731 return entry; 732 } 733 734 static inline unsigned long 735 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma) 736 { 737 unsigned long address; 738 739 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 740 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 741 return address; 742 } 743 744 /* Walk all mappings of a given index of a file and writeprotect them */ 745 static void dax_mapping_entry_mkclean(struct address_space *mapping, 746 pgoff_t index, unsigned long pfn) 747 { 748 struct vm_area_struct *vma; 749 pte_t pte, *ptep = NULL; 750 pmd_t *pmdp = NULL; 751 spinlock_t *ptl; 752 753 i_mmap_lock_read(mapping); 754 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 755 unsigned long address, start, end; 756 757 cond_resched(); 758 759 if (!(vma->vm_flags & VM_SHARED)) 760 continue; 761 762 address = pgoff_address(index, vma); 763 764 /* 765 * Note because we provide start/end to follow_pte_pmd it will 766 * call mmu_notifier_invalidate_range_start() on our behalf 767 * before taking any lock. 768 */ 769 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl)) 770 continue; 771 772 /* 773 * No need to call mmu_notifier_invalidate_range() as we are 774 * downgrading page table protection not changing it to point 775 * to a new page. 776 * 777 * See Documentation/vm/mmu_notifier.rst 778 */ 779 if (pmdp) { 780 #ifdef CONFIG_FS_DAX_PMD 781 pmd_t pmd; 782 783 if (pfn != pmd_pfn(*pmdp)) 784 goto unlock_pmd; 785 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) 786 goto unlock_pmd; 787 788 flush_cache_page(vma, address, pfn); 789 pmd = pmdp_huge_clear_flush(vma, address, pmdp); 790 pmd = pmd_wrprotect(pmd); 791 pmd = pmd_mkclean(pmd); 792 set_pmd_at(vma->vm_mm, address, pmdp, pmd); 793 unlock_pmd: 794 #endif 795 spin_unlock(ptl); 796 } else { 797 if (pfn != pte_pfn(*ptep)) 798 goto unlock_pte; 799 if (!pte_dirty(*ptep) && !pte_write(*ptep)) 800 goto unlock_pte; 801 802 flush_cache_page(vma, address, pfn); 803 pte = ptep_clear_flush(vma, address, ptep); 804 pte = pte_wrprotect(pte); 805 pte = pte_mkclean(pte); 806 set_pte_at(vma->vm_mm, address, ptep, pte); 807 unlock_pte: 808 pte_unmap_unlock(ptep, ptl); 809 } 810 811 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); 812 } 813 i_mmap_unlock_read(mapping); 814 } 815 816 static int dax_writeback_one(struct dax_device *dax_dev, 817 struct address_space *mapping, pgoff_t index, void *entry) 818 { 819 struct radix_tree_root *pages = &mapping->i_pages; 820 void *entry2, **slot; 821 unsigned long pfn; 822 long ret = 0; 823 size_t size; 824 825 /* 826 * A page got tagged dirty in DAX mapping? Something is seriously 827 * wrong. 828 */ 829 if (WARN_ON(!radix_tree_exceptional_entry(entry))) 830 return -EIO; 831 832 xa_lock_irq(pages); 833 entry2 = get_unlocked_mapping_entry(mapping, index, &slot); 834 /* Entry got punched out / reallocated? */ 835 if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2))) 836 goto put_unlocked; 837 /* 838 * Entry got reallocated elsewhere? No need to writeback. We have to 839 * compare pfns as we must not bail out due to difference in lockbit 840 * or entry type. 841 */ 842 if (dax_radix_pfn(entry2) != dax_radix_pfn(entry)) 843 goto put_unlocked; 844 if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 845 dax_is_zero_entry(entry))) { 846 ret = -EIO; 847 goto put_unlocked; 848 } 849 850 /* Another fsync thread may have already written back this entry */ 851 if (!radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE)) 852 goto put_unlocked; 853 /* Lock the entry to serialize with page faults */ 854 entry = lock_slot(mapping, slot); 855 /* 856 * We can clear the tag now but we have to be careful so that concurrent 857 * dax_writeback_one() calls for the same index cannot finish before we 858 * actually flush the caches. This is achieved as the calls will look 859 * at the entry only under the i_pages lock and once they do that 860 * they will see the entry locked and wait for it to unlock. 861 */ 862 radix_tree_tag_clear(pages, index, PAGECACHE_TAG_TOWRITE); 863 xa_unlock_irq(pages); 864 865 /* 866 * Even if dax_writeback_mapping_range() was given a wbc->range_start 867 * in the middle of a PMD, the 'index' we are given will be aligned to 868 * the start index of the PMD, as will the pfn we pull from 'entry'. 869 * This allows us to flush for PMD_SIZE and not have to worry about 870 * partial PMD writebacks. 871 */ 872 pfn = dax_radix_pfn(entry); 873 size = PAGE_SIZE << dax_radix_order(entry); 874 875 dax_mapping_entry_mkclean(mapping, index, pfn); 876 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size); 877 /* 878 * After we have flushed the cache, we can clear the dirty tag. There 879 * cannot be new dirty data in the pfn after the flush has completed as 880 * the pfn mappings are writeprotected and fault waits for mapping 881 * entry lock. 882 */ 883 xa_lock_irq(pages); 884 radix_tree_tag_clear(pages, index, PAGECACHE_TAG_DIRTY); 885 xa_unlock_irq(pages); 886 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT); 887 put_locked_mapping_entry(mapping, index); 888 return ret; 889 890 put_unlocked: 891 put_unlocked_mapping_entry(mapping, index, entry2); 892 xa_unlock_irq(pages); 893 return ret; 894 } 895 896 /* 897 * Flush the mapping to the persistent domain within the byte range of [start, 898 * end]. This is required by data integrity operations to ensure file data is 899 * on persistent storage prior to completion of the operation. 900 */ 901 int dax_writeback_mapping_range(struct address_space *mapping, 902 struct block_device *bdev, struct writeback_control *wbc) 903 { 904 struct inode *inode = mapping->host; 905 pgoff_t start_index, end_index; 906 pgoff_t indices[PAGEVEC_SIZE]; 907 struct dax_device *dax_dev; 908 struct pagevec pvec; 909 bool done = false; 910 int i, ret = 0; 911 912 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 913 return -EIO; 914 915 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 916 return 0; 917 918 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 919 if (!dax_dev) 920 return -EIO; 921 922 start_index = wbc->range_start >> PAGE_SHIFT; 923 end_index = wbc->range_end >> PAGE_SHIFT; 924 925 trace_dax_writeback_range(inode, start_index, end_index); 926 927 tag_pages_for_writeback(mapping, start_index, end_index); 928 929 pagevec_init(&pvec); 930 while (!done) { 931 pvec.nr = find_get_entries_tag(mapping, start_index, 932 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE, 933 pvec.pages, indices); 934 935 if (pvec.nr == 0) 936 break; 937 938 for (i = 0; i < pvec.nr; i++) { 939 if (indices[i] > end_index) { 940 done = true; 941 break; 942 } 943 944 ret = dax_writeback_one(dax_dev, mapping, indices[i], 945 pvec.pages[i]); 946 if (ret < 0) { 947 mapping_set_error(mapping, ret); 948 goto out; 949 } 950 } 951 start_index = indices[pvec.nr - 1] + 1; 952 } 953 out: 954 put_dax(dax_dev); 955 trace_dax_writeback_range_done(inode, start_index, end_index); 956 return (ret < 0 ? ret : 0); 957 } 958 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 959 960 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 961 { 962 return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9; 963 } 964 965 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size, 966 pfn_t *pfnp) 967 { 968 const sector_t sector = dax_iomap_sector(iomap, pos); 969 pgoff_t pgoff; 970 void *kaddr; 971 int id, rc; 972 long length; 973 974 rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff); 975 if (rc) 976 return rc; 977 id = dax_read_lock(); 978 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 979 &kaddr, pfnp); 980 if (length < 0) { 981 rc = length; 982 goto out; 983 } 984 rc = -EINVAL; 985 if (PFN_PHYS(length) < size) 986 goto out; 987 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 988 goto out; 989 /* For larger pages we need devmap */ 990 if (length > 1 && !pfn_t_devmap(*pfnp)) 991 goto out; 992 rc = 0; 993 out: 994 dax_read_unlock(id); 995 return rc; 996 } 997 998 /* 999 * The user has performed a load from a hole in the file. Allocating a new 1000 * page in the file would cause excessive storage usage for workloads with 1001 * sparse files. Instead we insert a read-only mapping of the 4k zero page. 1002 * If this page is ever written to we will re-fault and change the mapping to 1003 * point to real DAX storage instead. 1004 */ 1005 static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry, 1006 struct vm_fault *vmf) 1007 { 1008 struct inode *inode = mapping->host; 1009 unsigned long vaddr = vmf->address; 1010 vm_fault_t ret = VM_FAULT_NOPAGE; 1011 struct page *zero_page; 1012 pfn_t pfn; 1013 1014 zero_page = ZERO_PAGE(0); 1015 if (unlikely(!zero_page)) { 1016 ret = VM_FAULT_OOM; 1017 goto out; 1018 } 1019 1020 pfn = page_to_pfn_t(zero_page); 1021 dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE, 1022 false); 1023 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); 1024 out: 1025 trace_dax_load_hole(inode, vmf, ret); 1026 return ret; 1027 } 1028 1029 static bool dax_range_is_aligned(struct block_device *bdev, 1030 unsigned int offset, unsigned int length) 1031 { 1032 unsigned short sector_size = bdev_logical_block_size(bdev); 1033 1034 if (!IS_ALIGNED(offset, sector_size)) 1035 return false; 1036 if (!IS_ALIGNED(length, sector_size)) 1037 return false; 1038 1039 return true; 1040 } 1041 1042 int __dax_zero_page_range(struct block_device *bdev, 1043 struct dax_device *dax_dev, sector_t sector, 1044 unsigned int offset, unsigned int size) 1045 { 1046 if (dax_range_is_aligned(bdev, offset, size)) { 1047 sector_t start_sector = sector + (offset >> 9); 1048 1049 return blkdev_issue_zeroout(bdev, start_sector, 1050 size >> 9, GFP_NOFS, 0); 1051 } else { 1052 pgoff_t pgoff; 1053 long rc, id; 1054 void *kaddr; 1055 pfn_t pfn; 1056 1057 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); 1058 if (rc) 1059 return rc; 1060 1061 id = dax_read_lock(); 1062 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, 1063 &pfn); 1064 if (rc < 0) { 1065 dax_read_unlock(id); 1066 return rc; 1067 } 1068 memset(kaddr + offset, 0, size); 1069 dax_flush(dax_dev, kaddr + offset, size); 1070 dax_read_unlock(id); 1071 } 1072 return 0; 1073 } 1074 EXPORT_SYMBOL_GPL(__dax_zero_page_range); 1075 1076 static loff_t 1077 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 1078 struct iomap *iomap) 1079 { 1080 struct block_device *bdev = iomap->bdev; 1081 struct dax_device *dax_dev = iomap->dax_dev; 1082 struct iov_iter *iter = data; 1083 loff_t end = pos + length, done = 0; 1084 ssize_t ret = 0; 1085 size_t xfer; 1086 int id; 1087 1088 if (iov_iter_rw(iter) == READ) { 1089 end = min(end, i_size_read(inode)); 1090 if (pos >= end) 1091 return 0; 1092 1093 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 1094 return iov_iter_zero(min(length, end - pos), iter); 1095 } 1096 1097 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 1098 return -EIO; 1099 1100 /* 1101 * Write can allocate block for an area which has a hole page mapped 1102 * into page tables. We have to tear down these mappings so that data 1103 * written by write(2) is visible in mmap. 1104 */ 1105 if (iomap->flags & IOMAP_F_NEW) { 1106 invalidate_inode_pages2_range(inode->i_mapping, 1107 pos >> PAGE_SHIFT, 1108 (end - 1) >> PAGE_SHIFT); 1109 } 1110 1111 id = dax_read_lock(); 1112 while (pos < end) { 1113 unsigned offset = pos & (PAGE_SIZE - 1); 1114 const size_t size = ALIGN(length + offset, PAGE_SIZE); 1115 const sector_t sector = dax_iomap_sector(iomap, pos); 1116 ssize_t map_len; 1117 pgoff_t pgoff; 1118 void *kaddr; 1119 pfn_t pfn; 1120 1121 if (fatal_signal_pending(current)) { 1122 ret = -EINTR; 1123 break; 1124 } 1125 1126 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 1127 if (ret) 1128 break; 1129 1130 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 1131 &kaddr, &pfn); 1132 if (map_len < 0) { 1133 ret = map_len; 1134 break; 1135 } 1136 1137 map_len = PFN_PHYS(map_len); 1138 kaddr += offset; 1139 map_len -= offset; 1140 if (map_len > end - pos) 1141 map_len = end - pos; 1142 1143 /* 1144 * The userspace address for the memory copy has already been 1145 * validated via access_ok() in either vfs_read() or 1146 * vfs_write(), depending on which operation we are doing. 1147 */ 1148 if (iov_iter_rw(iter) == WRITE) 1149 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1150 map_len, iter); 1151 else 1152 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, 1153 map_len, iter); 1154 1155 pos += xfer; 1156 length -= xfer; 1157 done += xfer; 1158 1159 if (xfer == 0) 1160 ret = -EFAULT; 1161 if (xfer < map_len) 1162 break; 1163 } 1164 dax_read_unlock(id); 1165 1166 return done ? done : ret; 1167 } 1168 1169 /** 1170 * dax_iomap_rw - Perform I/O to a DAX file 1171 * @iocb: The control block for this I/O 1172 * @iter: The addresses to do I/O from or to 1173 * @ops: iomap ops passed from the file system 1174 * 1175 * This function performs read and write operations to directly mapped 1176 * persistent memory. The callers needs to take care of read/write exclusion 1177 * and evicting any page cache pages in the region under I/O. 1178 */ 1179 ssize_t 1180 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 1181 const struct iomap_ops *ops) 1182 { 1183 struct address_space *mapping = iocb->ki_filp->f_mapping; 1184 struct inode *inode = mapping->host; 1185 loff_t pos = iocb->ki_pos, ret = 0, done = 0; 1186 unsigned flags = 0; 1187 1188 if (iov_iter_rw(iter) == WRITE) { 1189 lockdep_assert_held_exclusive(&inode->i_rwsem); 1190 flags |= IOMAP_WRITE; 1191 } else { 1192 lockdep_assert_held(&inode->i_rwsem); 1193 } 1194 1195 while (iov_iter_count(iter)) { 1196 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, 1197 iter, dax_iomap_actor); 1198 if (ret <= 0) 1199 break; 1200 pos += ret; 1201 done += ret; 1202 } 1203 1204 iocb->ki_pos += done; 1205 return done ? done : ret; 1206 } 1207 EXPORT_SYMBOL_GPL(dax_iomap_rw); 1208 1209 static vm_fault_t dax_fault_return(int error) 1210 { 1211 if (error == 0) 1212 return VM_FAULT_NOPAGE; 1213 if (error == -ENOMEM) 1214 return VM_FAULT_OOM; 1215 return VM_FAULT_SIGBUS; 1216 } 1217 1218 /* 1219 * MAP_SYNC on a dax mapping guarantees dirty metadata is 1220 * flushed on write-faults (non-cow), but not read-faults. 1221 */ 1222 static bool dax_fault_is_synchronous(unsigned long flags, 1223 struct vm_area_struct *vma, struct iomap *iomap) 1224 { 1225 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) 1226 && (iomap->flags & IOMAP_F_DIRTY); 1227 } 1228 1229 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1230 int *iomap_errp, const struct iomap_ops *ops) 1231 { 1232 struct vm_area_struct *vma = vmf->vma; 1233 struct address_space *mapping = vma->vm_file->f_mapping; 1234 struct inode *inode = mapping->host; 1235 unsigned long vaddr = vmf->address; 1236 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; 1237 struct iomap iomap = { 0 }; 1238 unsigned flags = IOMAP_FAULT; 1239 int error, major = 0; 1240 bool write = vmf->flags & FAULT_FLAG_WRITE; 1241 bool sync; 1242 vm_fault_t ret = 0; 1243 void *entry; 1244 pfn_t pfn; 1245 1246 trace_dax_pte_fault(inode, vmf, ret); 1247 /* 1248 * Check whether offset isn't beyond end of file now. Caller is supposed 1249 * to hold locks serializing us with truncate / punch hole so this is 1250 * a reliable test. 1251 */ 1252 if (pos >= i_size_read(inode)) { 1253 ret = VM_FAULT_SIGBUS; 1254 goto out; 1255 } 1256 1257 if (write && !vmf->cow_page) 1258 flags |= IOMAP_WRITE; 1259 1260 entry = grab_mapping_entry(mapping, vmf->pgoff, 0); 1261 if (IS_ERR(entry)) { 1262 ret = dax_fault_return(PTR_ERR(entry)); 1263 goto out; 1264 } 1265 1266 /* 1267 * It is possible, particularly with mixed reads & writes to private 1268 * mappings, that we have raced with a PMD fault that overlaps with 1269 * the PTE we need to set up. If so just return and the fault will be 1270 * retried. 1271 */ 1272 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1273 ret = VM_FAULT_NOPAGE; 1274 goto unlock_entry; 1275 } 1276 1277 /* 1278 * Note that we don't bother to use iomap_apply here: DAX required 1279 * the file system block size to be equal the page size, which means 1280 * that we never have to deal with more than a single extent here. 1281 */ 1282 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); 1283 if (iomap_errp) 1284 *iomap_errp = error; 1285 if (error) { 1286 ret = dax_fault_return(error); 1287 goto unlock_entry; 1288 } 1289 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 1290 error = -EIO; /* fs corruption? */ 1291 goto error_finish_iomap; 1292 } 1293 1294 if (vmf->cow_page) { 1295 sector_t sector = dax_iomap_sector(&iomap, pos); 1296 1297 switch (iomap.type) { 1298 case IOMAP_HOLE: 1299 case IOMAP_UNWRITTEN: 1300 clear_user_highpage(vmf->cow_page, vaddr); 1301 break; 1302 case IOMAP_MAPPED: 1303 error = copy_user_dax(iomap.bdev, iomap.dax_dev, 1304 sector, PAGE_SIZE, vmf->cow_page, vaddr); 1305 break; 1306 default: 1307 WARN_ON_ONCE(1); 1308 error = -EIO; 1309 break; 1310 } 1311 1312 if (error) 1313 goto error_finish_iomap; 1314 1315 __SetPageUptodate(vmf->cow_page); 1316 ret = finish_fault(vmf); 1317 if (!ret) 1318 ret = VM_FAULT_DONE_COW; 1319 goto finish_iomap; 1320 } 1321 1322 sync = dax_fault_is_synchronous(flags, vma, &iomap); 1323 1324 switch (iomap.type) { 1325 case IOMAP_MAPPED: 1326 if (iomap.flags & IOMAP_F_NEW) { 1327 count_vm_event(PGMAJFAULT); 1328 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 1329 major = VM_FAULT_MAJOR; 1330 } 1331 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn); 1332 if (error < 0) 1333 goto error_finish_iomap; 1334 1335 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn, 1336 0, write && !sync); 1337 1338 /* 1339 * If we are doing synchronous page fault and inode needs fsync, 1340 * we can insert PTE into page tables only after that happens. 1341 * Skip insertion for now and return the pfn so that caller can 1342 * insert it after fsync is done. 1343 */ 1344 if (sync) { 1345 if (WARN_ON_ONCE(!pfnp)) { 1346 error = -EIO; 1347 goto error_finish_iomap; 1348 } 1349 *pfnp = pfn; 1350 ret = VM_FAULT_NEEDDSYNC | major; 1351 goto finish_iomap; 1352 } 1353 trace_dax_insert_mapping(inode, vmf, entry); 1354 if (write) 1355 ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn); 1356 else 1357 ret = vmf_insert_mixed(vma, vaddr, pfn); 1358 1359 goto finish_iomap; 1360 case IOMAP_UNWRITTEN: 1361 case IOMAP_HOLE: 1362 if (!write) { 1363 ret = dax_load_hole(mapping, entry, vmf); 1364 goto finish_iomap; 1365 } 1366 /*FALLTHRU*/ 1367 default: 1368 WARN_ON_ONCE(1); 1369 error = -EIO; 1370 break; 1371 } 1372 1373 error_finish_iomap: 1374 ret = dax_fault_return(error); 1375 finish_iomap: 1376 if (ops->iomap_end) { 1377 int copied = PAGE_SIZE; 1378 1379 if (ret & VM_FAULT_ERROR) 1380 copied = 0; 1381 /* 1382 * The fault is done by now and there's no way back (other 1383 * thread may be already happily using PTE we have installed). 1384 * Just ignore error from ->iomap_end since we cannot do much 1385 * with it. 1386 */ 1387 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); 1388 } 1389 unlock_entry: 1390 put_locked_mapping_entry(mapping, vmf->pgoff); 1391 out: 1392 trace_dax_pte_fault_done(inode, vmf, ret); 1393 return ret | major; 1394 } 1395 1396 #ifdef CONFIG_FS_DAX_PMD 1397 static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap, 1398 void *entry) 1399 { 1400 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1401 unsigned long pmd_addr = vmf->address & PMD_MASK; 1402 struct inode *inode = mapping->host; 1403 struct page *zero_page; 1404 void *ret = NULL; 1405 spinlock_t *ptl; 1406 pmd_t pmd_entry; 1407 pfn_t pfn; 1408 1409 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1410 1411 if (unlikely(!zero_page)) 1412 goto fallback; 1413 1414 pfn = page_to_pfn_t(zero_page); 1415 ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn, 1416 RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false); 1417 1418 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1419 if (!pmd_none(*(vmf->pmd))) { 1420 spin_unlock(ptl); 1421 goto fallback; 1422 } 1423 1424 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1425 pmd_entry = pmd_mkhuge(pmd_entry); 1426 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1427 spin_unlock(ptl); 1428 trace_dax_pmd_load_hole(inode, vmf, zero_page, ret); 1429 return VM_FAULT_NOPAGE; 1430 1431 fallback: 1432 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret); 1433 return VM_FAULT_FALLBACK; 1434 } 1435 1436 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1437 const struct iomap_ops *ops) 1438 { 1439 struct vm_area_struct *vma = vmf->vma; 1440 struct address_space *mapping = vma->vm_file->f_mapping; 1441 unsigned long pmd_addr = vmf->address & PMD_MASK; 1442 bool write = vmf->flags & FAULT_FLAG_WRITE; 1443 bool sync; 1444 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; 1445 struct inode *inode = mapping->host; 1446 vm_fault_t result = VM_FAULT_FALLBACK; 1447 struct iomap iomap = { 0 }; 1448 pgoff_t max_pgoff, pgoff; 1449 void *entry; 1450 loff_t pos; 1451 int error; 1452 pfn_t pfn; 1453 1454 /* 1455 * Check whether offset isn't beyond end of file now. Caller is 1456 * supposed to hold locks serializing us with truncate / punch hole so 1457 * this is a reliable test. 1458 */ 1459 pgoff = linear_page_index(vma, pmd_addr); 1460 max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1461 1462 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); 1463 1464 /* 1465 * Make sure that the faulting address's PMD offset (color) matches 1466 * the PMD offset from the start of the file. This is necessary so 1467 * that a PMD range in the page table overlaps exactly with a PMD 1468 * range in the radix tree. 1469 */ 1470 if ((vmf->pgoff & PG_PMD_COLOUR) != 1471 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 1472 goto fallback; 1473 1474 /* Fall back to PTEs if we're going to COW */ 1475 if (write && !(vma->vm_flags & VM_SHARED)) 1476 goto fallback; 1477 1478 /* If the PMD would extend outside the VMA */ 1479 if (pmd_addr < vma->vm_start) 1480 goto fallback; 1481 if ((pmd_addr + PMD_SIZE) > vma->vm_end) 1482 goto fallback; 1483 1484 if (pgoff >= max_pgoff) { 1485 result = VM_FAULT_SIGBUS; 1486 goto out; 1487 } 1488 1489 /* If the PMD would extend beyond the file size */ 1490 if ((pgoff | PG_PMD_COLOUR) >= max_pgoff) 1491 goto fallback; 1492 1493 /* 1494 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a 1495 * 2MiB zero page entry or a DAX PMD. If it can't (because a 4k page 1496 * is already in the tree, for instance), it will return -EEXIST and 1497 * we just fall back to 4k entries. 1498 */ 1499 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); 1500 if (IS_ERR(entry)) 1501 goto fallback; 1502 1503 /* 1504 * It is possible, particularly with mixed reads & writes to private 1505 * mappings, that we have raced with a PTE fault that overlaps with 1506 * the PMD we need to set up. If so just return and the fault will be 1507 * retried. 1508 */ 1509 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1510 !pmd_devmap(*vmf->pmd)) { 1511 result = 0; 1512 goto unlock_entry; 1513 } 1514 1515 /* 1516 * Note that we don't use iomap_apply here. We aren't doing I/O, only 1517 * setting up a mapping, so really we're using iomap_begin() as a way 1518 * to look up our filesystem block. 1519 */ 1520 pos = (loff_t)pgoff << PAGE_SHIFT; 1521 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); 1522 if (error) 1523 goto unlock_entry; 1524 1525 if (iomap.offset + iomap.length < pos + PMD_SIZE) 1526 goto finish_iomap; 1527 1528 sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap); 1529 1530 switch (iomap.type) { 1531 case IOMAP_MAPPED: 1532 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn); 1533 if (error < 0) 1534 goto finish_iomap; 1535 1536 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn, 1537 RADIX_DAX_PMD, write && !sync); 1538 1539 /* 1540 * If we are doing synchronous page fault and inode needs fsync, 1541 * we can insert PMD into page tables only after that happens. 1542 * Skip insertion for now and return the pfn so that caller can 1543 * insert it after fsync is done. 1544 */ 1545 if (sync) { 1546 if (WARN_ON_ONCE(!pfnp)) 1547 goto finish_iomap; 1548 *pfnp = pfn; 1549 result = VM_FAULT_NEEDDSYNC; 1550 goto finish_iomap; 1551 } 1552 1553 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); 1554 result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn, 1555 write); 1556 break; 1557 case IOMAP_UNWRITTEN: 1558 case IOMAP_HOLE: 1559 if (WARN_ON_ONCE(write)) 1560 break; 1561 result = dax_pmd_load_hole(vmf, &iomap, entry); 1562 break; 1563 default: 1564 WARN_ON_ONCE(1); 1565 break; 1566 } 1567 1568 finish_iomap: 1569 if (ops->iomap_end) { 1570 int copied = PMD_SIZE; 1571 1572 if (result == VM_FAULT_FALLBACK) 1573 copied = 0; 1574 /* 1575 * The fault is done by now and there's no way back (other 1576 * thread may be already happily using PMD we have installed). 1577 * Just ignore error from ->iomap_end since we cannot do much 1578 * with it. 1579 */ 1580 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, 1581 &iomap); 1582 } 1583 unlock_entry: 1584 put_locked_mapping_entry(mapping, pgoff); 1585 fallback: 1586 if (result == VM_FAULT_FALLBACK) { 1587 split_huge_pmd(vma, vmf->pmd, vmf->address); 1588 count_vm_event(THP_FAULT_FALLBACK); 1589 } 1590 out: 1591 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); 1592 return result; 1593 } 1594 #else 1595 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1596 const struct iomap_ops *ops) 1597 { 1598 return VM_FAULT_FALLBACK; 1599 } 1600 #endif /* CONFIG_FS_DAX_PMD */ 1601 1602 /** 1603 * dax_iomap_fault - handle a page fault on a DAX file 1604 * @vmf: The description of the fault 1605 * @pe_size: Size of the page to fault in 1606 * @pfnp: PFN to insert for synchronous faults if fsync is required 1607 * @iomap_errp: Storage for detailed error code in case of error 1608 * @ops: Iomap ops passed from the file system 1609 * 1610 * When a page fault occurs, filesystems may call this helper in 1611 * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1612 * has done all the necessary locking for page fault to proceed 1613 * successfully. 1614 */ 1615 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1616 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) 1617 { 1618 switch (pe_size) { 1619 case PE_SIZE_PTE: 1620 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); 1621 case PE_SIZE_PMD: 1622 return dax_iomap_pmd_fault(vmf, pfnp, ops); 1623 default: 1624 return VM_FAULT_FALLBACK; 1625 } 1626 } 1627 EXPORT_SYMBOL_GPL(dax_iomap_fault); 1628 1629 /** 1630 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables 1631 * @vmf: The description of the fault 1632 * @pe_size: Size of entry to be inserted 1633 * @pfn: PFN to insert 1634 * 1635 * This function inserts writeable PTE or PMD entry into page tables for mmaped 1636 * DAX file. It takes care of marking corresponding radix tree entry as dirty 1637 * as well. 1638 */ 1639 static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf, 1640 enum page_entry_size pe_size, 1641 pfn_t pfn) 1642 { 1643 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1644 void *entry, **slot; 1645 pgoff_t index = vmf->pgoff; 1646 vm_fault_t ret; 1647 1648 xa_lock_irq(&mapping->i_pages); 1649 entry = get_unlocked_mapping_entry(mapping, index, &slot); 1650 /* Did we race with someone splitting entry or so? */ 1651 if (!entry || 1652 (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) || 1653 (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) { 1654 put_unlocked_mapping_entry(mapping, index, entry); 1655 xa_unlock_irq(&mapping->i_pages); 1656 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 1657 VM_FAULT_NOPAGE); 1658 return VM_FAULT_NOPAGE; 1659 } 1660 radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY); 1661 entry = lock_slot(mapping, slot); 1662 xa_unlock_irq(&mapping->i_pages); 1663 switch (pe_size) { 1664 case PE_SIZE_PTE: 1665 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 1666 break; 1667 #ifdef CONFIG_FS_DAX_PMD 1668 case PE_SIZE_PMD: 1669 ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, 1670 pfn, true); 1671 break; 1672 #endif 1673 default: 1674 ret = VM_FAULT_FALLBACK; 1675 } 1676 put_locked_mapping_entry(mapping, index); 1677 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); 1678 return ret; 1679 } 1680 1681 /** 1682 * dax_finish_sync_fault - finish synchronous page fault 1683 * @vmf: The description of the fault 1684 * @pe_size: Size of entry to be inserted 1685 * @pfn: PFN to insert 1686 * 1687 * This function ensures that the file range touched by the page fault is 1688 * stored persistently on the media and handles inserting of appropriate page 1689 * table entry. 1690 */ 1691 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 1692 enum page_entry_size pe_size, pfn_t pfn) 1693 { 1694 int err; 1695 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; 1696 size_t len = 0; 1697 1698 if (pe_size == PE_SIZE_PTE) 1699 len = PAGE_SIZE; 1700 else if (pe_size == PE_SIZE_PMD) 1701 len = PMD_SIZE; 1702 else 1703 WARN_ON_ONCE(1); 1704 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); 1705 if (err) 1706 return VM_FAULT_SIGBUS; 1707 return dax_insert_pfn_mkwrite(vmf, pe_size, pfn); 1708 } 1709 EXPORT_SYMBOL_GPL(dax_finish_sync_fault); 1710