1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * fs/dax.c - Direct Access filesystem code 4 * Copyright (c) 2013-2014 Intel Corporation 5 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 6 * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 7 */ 8 9 #include <linux/atomic.h> 10 #include <linux/blkdev.h> 11 #include <linux/buffer_head.h> 12 #include <linux/dax.h> 13 #include <linux/fs.h> 14 #include <linux/genhd.h> 15 #include <linux/highmem.h> 16 #include <linux/memcontrol.h> 17 #include <linux/mm.h> 18 #include <linux/mutex.h> 19 #include <linux/pagevec.h> 20 #include <linux/sched.h> 21 #include <linux/sched/signal.h> 22 #include <linux/uio.h> 23 #include <linux/vmstat.h> 24 #include <linux/pfn_t.h> 25 #include <linux/sizes.h> 26 #include <linux/mmu_notifier.h> 27 #include <linux/iomap.h> 28 #include <asm/pgalloc.h> 29 #include "internal.h" 30 31 #define CREATE_TRACE_POINTS 32 #include <trace/events/fs_dax.h> 33 34 static inline unsigned int pe_order(enum page_entry_size pe_size) 35 { 36 if (pe_size == PE_SIZE_PTE) 37 return PAGE_SHIFT - PAGE_SHIFT; 38 if (pe_size == PE_SIZE_PMD) 39 return PMD_SHIFT - PAGE_SHIFT; 40 if (pe_size == PE_SIZE_PUD) 41 return PUD_SHIFT - PAGE_SHIFT; 42 return ~0; 43 } 44 45 /* We choose 4096 entries - same as per-zone page wait tables */ 46 #define DAX_WAIT_TABLE_BITS 12 47 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 48 49 /* The 'colour' (ie low bits) within a PMD of a page offset. */ 50 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 51 #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) 52 53 /* The order of a PMD entry */ 54 #define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT) 55 56 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 57 58 static int __init init_dax_wait_table(void) 59 { 60 int i; 61 62 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 63 init_waitqueue_head(wait_table + i); 64 return 0; 65 } 66 fs_initcall(init_dax_wait_table); 67 68 /* 69 * DAX pagecache entries use XArray value entries so they can't be mistaken 70 * for pages. We use one bit for locking, one bit for the entry size (PMD) 71 * and two more to tell us if the entry is a zero page or an empty entry that 72 * is just used for locking. In total four special bits. 73 * 74 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 75 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 76 * block allocation. 77 */ 78 #define DAX_SHIFT (4) 79 #define DAX_LOCKED (1UL << 0) 80 #define DAX_PMD (1UL << 1) 81 #define DAX_ZERO_PAGE (1UL << 2) 82 #define DAX_EMPTY (1UL << 3) 83 84 static unsigned long dax_to_pfn(void *entry) 85 { 86 return xa_to_value(entry) >> DAX_SHIFT; 87 } 88 89 static void *dax_make_entry(pfn_t pfn, unsigned long flags) 90 { 91 return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); 92 } 93 94 static bool dax_is_locked(void *entry) 95 { 96 return xa_to_value(entry) & DAX_LOCKED; 97 } 98 99 static unsigned int dax_entry_order(void *entry) 100 { 101 if (xa_to_value(entry) & DAX_PMD) 102 return PMD_ORDER; 103 return 0; 104 } 105 106 static unsigned long dax_is_pmd_entry(void *entry) 107 { 108 return xa_to_value(entry) & DAX_PMD; 109 } 110 111 static bool dax_is_pte_entry(void *entry) 112 { 113 return !(xa_to_value(entry) & DAX_PMD); 114 } 115 116 static int dax_is_zero_entry(void *entry) 117 { 118 return xa_to_value(entry) & DAX_ZERO_PAGE; 119 } 120 121 static int dax_is_empty_entry(void *entry) 122 { 123 return xa_to_value(entry) & DAX_EMPTY; 124 } 125 126 /* 127 * DAX page cache entry locking 128 */ 129 struct exceptional_entry_key { 130 struct xarray *xa; 131 pgoff_t entry_start; 132 }; 133 134 struct wait_exceptional_entry_queue { 135 wait_queue_entry_t wait; 136 struct exceptional_entry_key key; 137 }; 138 139 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, 140 void *entry, struct exceptional_entry_key *key) 141 { 142 unsigned long hash; 143 unsigned long index = xas->xa_index; 144 145 /* 146 * If 'entry' is a PMD, align the 'index' that we use for the wait 147 * queue to the start of that PMD. This ensures that all offsets in 148 * the range covered by the PMD map to the same bit lock. 149 */ 150 if (dax_is_pmd_entry(entry)) 151 index &= ~PG_PMD_COLOUR; 152 key->xa = xas->xa; 153 key->entry_start = index; 154 155 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); 156 return wait_table + hash; 157 } 158 159 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, 160 unsigned int mode, int sync, void *keyp) 161 { 162 struct exceptional_entry_key *key = keyp; 163 struct wait_exceptional_entry_queue *ewait = 164 container_of(wait, struct wait_exceptional_entry_queue, wait); 165 166 if (key->xa != ewait->key.xa || 167 key->entry_start != ewait->key.entry_start) 168 return 0; 169 return autoremove_wake_function(wait, mode, sync, NULL); 170 } 171 172 /* 173 * @entry may no longer be the entry at the index in the mapping. 174 * The important information it's conveying is whether the entry at 175 * this index used to be a PMD entry. 176 */ 177 static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all) 178 { 179 struct exceptional_entry_key key; 180 wait_queue_head_t *wq; 181 182 wq = dax_entry_waitqueue(xas, entry, &key); 183 184 /* 185 * Checking for locked entry and prepare_to_wait_exclusive() happens 186 * under the i_pages lock, ditto for entry handling in our callers. 187 * So at this point all tasks that could have seen our entry locked 188 * must be in the waitqueue and the following check will see them. 189 */ 190 if (waitqueue_active(wq)) 191 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 192 } 193 194 /* 195 * Look up entry in page cache, wait for it to become unlocked if it 196 * is a DAX entry and return it. The caller must subsequently call 197 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry() 198 * if it did. 199 * 200 * Must be called with the i_pages lock held. 201 */ 202 static void *get_unlocked_entry(struct xa_state *xas) 203 { 204 void *entry; 205 struct wait_exceptional_entry_queue ewait; 206 wait_queue_head_t *wq; 207 208 init_wait(&ewait.wait); 209 ewait.wait.func = wake_exceptional_entry_func; 210 211 for (;;) { 212 entry = xas_find_conflict(xas); 213 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) || 214 !dax_is_locked(entry)) 215 return entry; 216 217 wq = dax_entry_waitqueue(xas, entry, &ewait.key); 218 prepare_to_wait_exclusive(wq, &ewait.wait, 219 TASK_UNINTERRUPTIBLE); 220 xas_unlock_irq(xas); 221 xas_reset(xas); 222 schedule(); 223 finish_wait(wq, &ewait.wait); 224 xas_lock_irq(xas); 225 } 226 } 227 228 /* 229 * The only thing keeping the address space around is the i_pages lock 230 * (it's cycled in clear_inode() after removing the entries from i_pages) 231 * After we call xas_unlock_irq(), we cannot touch xas->xa. 232 */ 233 static void wait_entry_unlocked(struct xa_state *xas, void *entry) 234 { 235 struct wait_exceptional_entry_queue ewait; 236 wait_queue_head_t *wq; 237 238 init_wait(&ewait.wait); 239 ewait.wait.func = wake_exceptional_entry_func; 240 241 wq = dax_entry_waitqueue(xas, entry, &ewait.key); 242 /* 243 * Unlike get_unlocked_entry() there is no guarantee that this 244 * path ever successfully retrieves an unlocked entry before an 245 * inode dies. Perform a non-exclusive wait in case this path 246 * never successfully performs its own wake up. 247 */ 248 prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); 249 xas_unlock_irq(xas); 250 schedule(); 251 finish_wait(wq, &ewait.wait); 252 } 253 254 static void put_unlocked_entry(struct xa_state *xas, void *entry) 255 { 256 /* If we were the only waiter woken, wake the next one */ 257 if (entry) 258 dax_wake_entry(xas, entry, false); 259 } 260 261 /* 262 * We used the xa_state to get the entry, but then we locked the entry and 263 * dropped the xa_lock, so we know the xa_state is stale and must be reset 264 * before use. 265 */ 266 static void dax_unlock_entry(struct xa_state *xas, void *entry) 267 { 268 void *old; 269 270 BUG_ON(dax_is_locked(entry)); 271 xas_reset(xas); 272 xas_lock_irq(xas); 273 old = xas_store(xas, entry); 274 xas_unlock_irq(xas); 275 BUG_ON(!dax_is_locked(old)); 276 dax_wake_entry(xas, entry, false); 277 } 278 279 /* 280 * Return: The entry stored at this location before it was locked. 281 */ 282 static void *dax_lock_entry(struct xa_state *xas, void *entry) 283 { 284 unsigned long v = xa_to_value(entry); 285 return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); 286 } 287 288 static unsigned long dax_entry_size(void *entry) 289 { 290 if (dax_is_zero_entry(entry)) 291 return 0; 292 else if (dax_is_empty_entry(entry)) 293 return 0; 294 else if (dax_is_pmd_entry(entry)) 295 return PMD_SIZE; 296 else 297 return PAGE_SIZE; 298 } 299 300 static unsigned long dax_end_pfn(void *entry) 301 { 302 return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; 303 } 304 305 /* 306 * Iterate through all mapped pfns represented by an entry, i.e. skip 307 * 'empty' and 'zero' entries. 308 */ 309 #define for_each_mapped_pfn(entry, pfn) \ 310 for (pfn = dax_to_pfn(entry); \ 311 pfn < dax_end_pfn(entry); pfn++) 312 313 /* 314 * TODO: for reflink+dax we need a way to associate a single page with 315 * multiple address_space instances at different linear_page_index() 316 * offsets. 317 */ 318 static void dax_associate_entry(void *entry, struct address_space *mapping, 319 struct vm_area_struct *vma, unsigned long address) 320 { 321 unsigned long size = dax_entry_size(entry), pfn, index; 322 int i = 0; 323 324 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 325 return; 326 327 index = linear_page_index(vma, address & ~(size - 1)); 328 for_each_mapped_pfn(entry, pfn) { 329 struct page *page = pfn_to_page(pfn); 330 331 WARN_ON_ONCE(page->mapping); 332 page->mapping = mapping; 333 page->index = index + i++; 334 } 335 } 336 337 static void dax_disassociate_entry(void *entry, struct address_space *mapping, 338 bool trunc) 339 { 340 unsigned long pfn; 341 342 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 343 return; 344 345 for_each_mapped_pfn(entry, pfn) { 346 struct page *page = pfn_to_page(pfn); 347 348 WARN_ON_ONCE(trunc && page_ref_count(page) > 1); 349 WARN_ON_ONCE(page->mapping && page->mapping != mapping); 350 page->mapping = NULL; 351 page->index = 0; 352 } 353 } 354 355 static struct page *dax_busy_page(void *entry) 356 { 357 unsigned long pfn; 358 359 for_each_mapped_pfn(entry, pfn) { 360 struct page *page = pfn_to_page(pfn); 361 362 if (page_ref_count(page) > 1) 363 return page; 364 } 365 return NULL; 366 } 367 368 /* 369 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page 370 * @page: The page whose entry we want to lock 371 * 372 * Context: Process context. 373 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could 374 * not be locked. 375 */ 376 dax_entry_t dax_lock_page(struct page *page) 377 { 378 XA_STATE(xas, NULL, 0); 379 void *entry; 380 381 /* Ensure page->mapping isn't freed while we look at it */ 382 rcu_read_lock(); 383 for (;;) { 384 struct address_space *mapping = READ_ONCE(page->mapping); 385 386 entry = NULL; 387 if (!mapping || !dax_mapping(mapping)) 388 break; 389 390 /* 391 * In the device-dax case there's no need to lock, a 392 * struct dev_pagemap pin is sufficient to keep the 393 * inode alive, and we assume we have dev_pagemap pin 394 * otherwise we would not have a valid pfn_to_page() 395 * translation. 396 */ 397 entry = (void *)~0UL; 398 if (S_ISCHR(mapping->host->i_mode)) 399 break; 400 401 xas.xa = &mapping->i_pages; 402 xas_lock_irq(&xas); 403 if (mapping != page->mapping) { 404 xas_unlock_irq(&xas); 405 continue; 406 } 407 xas_set(&xas, page->index); 408 entry = xas_load(&xas); 409 if (dax_is_locked(entry)) { 410 rcu_read_unlock(); 411 wait_entry_unlocked(&xas, entry); 412 rcu_read_lock(); 413 continue; 414 } 415 dax_lock_entry(&xas, entry); 416 xas_unlock_irq(&xas); 417 break; 418 } 419 rcu_read_unlock(); 420 return (dax_entry_t)entry; 421 } 422 423 void dax_unlock_page(struct page *page, dax_entry_t cookie) 424 { 425 struct address_space *mapping = page->mapping; 426 XA_STATE(xas, &mapping->i_pages, page->index); 427 428 if (S_ISCHR(mapping->host->i_mode)) 429 return; 430 431 dax_unlock_entry(&xas, (void *)cookie); 432 } 433 434 /* 435 * Find page cache entry at given index. If it is a DAX entry, return it 436 * with the entry locked. If the page cache doesn't contain an entry at 437 * that index, add a locked empty entry. 438 * 439 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will 440 * either return that locked entry or will return VM_FAULT_FALLBACK. 441 * This will happen if there are any PTE entries within the PMD range 442 * that we are requesting. 443 * 444 * We always favor PTE entries over PMD entries. There isn't a flow where we 445 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD 446 * insertion will fail if it finds any PTE entries already in the tree, and a 447 * PTE insertion will cause an existing PMD entry to be unmapped and 448 * downgraded to PTE entries. This happens for both PMD zero pages as 449 * well as PMD empty entries. 450 * 451 * The exception to this downgrade path is for PMD entries that have 452 * real storage backing them. We will leave these real PMD entries in 453 * the tree, and PTE writes will simply dirty the entire PMD entry. 454 * 455 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 456 * persistent memory the benefit is doubtful. We can add that later if we can 457 * show it helps. 458 * 459 * On error, this function does not return an ERR_PTR. Instead it returns 460 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values 461 * overlap with xarray value entries. 462 */ 463 static void *grab_mapping_entry(struct xa_state *xas, 464 struct address_space *mapping, unsigned long size_flag) 465 { 466 unsigned long index = xas->xa_index; 467 bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */ 468 void *entry; 469 470 retry: 471 xas_lock_irq(xas); 472 entry = get_unlocked_entry(xas); 473 474 if (entry) { 475 if (!xa_is_value(entry)) { 476 xas_set_err(xas, EIO); 477 goto out_unlock; 478 } 479 480 if (size_flag & DAX_PMD) { 481 if (dax_is_pte_entry(entry)) { 482 put_unlocked_entry(xas, entry); 483 goto fallback; 484 } 485 } else { /* trying to grab a PTE entry */ 486 if (dax_is_pmd_entry(entry) && 487 (dax_is_zero_entry(entry) || 488 dax_is_empty_entry(entry))) { 489 pmd_downgrade = true; 490 } 491 } 492 } 493 494 if (pmd_downgrade) { 495 /* 496 * Make sure 'entry' remains valid while we drop 497 * the i_pages lock. 498 */ 499 dax_lock_entry(xas, entry); 500 501 /* 502 * Besides huge zero pages the only other thing that gets 503 * downgraded are empty entries which don't need to be 504 * unmapped. 505 */ 506 if (dax_is_zero_entry(entry)) { 507 xas_unlock_irq(xas); 508 unmap_mapping_pages(mapping, 509 xas->xa_index & ~PG_PMD_COLOUR, 510 PG_PMD_NR, false); 511 xas_reset(xas); 512 xas_lock_irq(xas); 513 } 514 515 dax_disassociate_entry(entry, mapping, false); 516 xas_store(xas, NULL); /* undo the PMD join */ 517 dax_wake_entry(xas, entry, true); 518 mapping->nrexceptional--; 519 entry = NULL; 520 xas_set(xas, index); 521 } 522 523 if (entry) { 524 dax_lock_entry(xas, entry); 525 } else { 526 entry = dax_make_entry(pfn_to_pfn_t(0), size_flag | DAX_EMPTY); 527 dax_lock_entry(xas, entry); 528 if (xas_error(xas)) 529 goto out_unlock; 530 mapping->nrexceptional++; 531 } 532 533 out_unlock: 534 xas_unlock_irq(xas); 535 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) 536 goto retry; 537 if (xas->xa_node == XA_ERROR(-ENOMEM)) 538 return xa_mk_internal(VM_FAULT_OOM); 539 if (xas_error(xas)) 540 return xa_mk_internal(VM_FAULT_SIGBUS); 541 return entry; 542 fallback: 543 xas_unlock_irq(xas); 544 return xa_mk_internal(VM_FAULT_FALLBACK); 545 } 546 547 /** 548 * dax_layout_busy_page - find first pinned page in @mapping 549 * @mapping: address space to scan for a page with ref count > 1 550 * 551 * DAX requires ZONE_DEVICE mapped pages. These pages are never 552 * 'onlined' to the page allocator so they are considered idle when 553 * page->count == 1. A filesystem uses this interface to determine if 554 * any page in the mapping is busy, i.e. for DMA, or other 555 * get_user_pages() usages. 556 * 557 * It is expected that the filesystem is holding locks to block the 558 * establishment of new mappings in this address_space. I.e. it expects 559 * to be able to run unmap_mapping_range() and subsequently not race 560 * mapping_mapped() becoming true. 561 */ 562 struct page *dax_layout_busy_page(struct address_space *mapping) 563 { 564 XA_STATE(xas, &mapping->i_pages, 0); 565 void *entry; 566 unsigned int scanned = 0; 567 struct page *page = NULL; 568 569 /* 570 * In the 'limited' case get_user_pages() for dax is disabled. 571 */ 572 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 573 return NULL; 574 575 if (!dax_mapping(mapping) || !mapping_mapped(mapping)) 576 return NULL; 577 578 /* 579 * If we race get_user_pages_fast() here either we'll see the 580 * elevated page count in the iteration and wait, or 581 * get_user_pages_fast() will see that the page it took a reference 582 * against is no longer mapped in the page tables and bail to the 583 * get_user_pages() slow path. The slow path is protected by 584 * pte_lock() and pmd_lock(). New references are not taken without 585 * holding those locks, and unmap_mapping_range() will not zero the 586 * pte or pmd without holding the respective lock, so we are 587 * guaranteed to either see new references or prevent new 588 * references from being established. 589 */ 590 unmap_mapping_range(mapping, 0, 0, 1); 591 592 xas_lock_irq(&xas); 593 xas_for_each(&xas, entry, ULONG_MAX) { 594 if (WARN_ON_ONCE(!xa_is_value(entry))) 595 continue; 596 if (unlikely(dax_is_locked(entry))) 597 entry = get_unlocked_entry(&xas); 598 if (entry) 599 page = dax_busy_page(entry); 600 put_unlocked_entry(&xas, entry); 601 if (page) 602 break; 603 if (++scanned % XA_CHECK_SCHED) 604 continue; 605 606 xas_pause(&xas); 607 xas_unlock_irq(&xas); 608 cond_resched(); 609 xas_lock_irq(&xas); 610 } 611 xas_unlock_irq(&xas); 612 return page; 613 } 614 EXPORT_SYMBOL_GPL(dax_layout_busy_page); 615 616 static int __dax_invalidate_entry(struct address_space *mapping, 617 pgoff_t index, bool trunc) 618 { 619 XA_STATE(xas, &mapping->i_pages, index); 620 int ret = 0; 621 void *entry; 622 623 xas_lock_irq(&xas); 624 entry = get_unlocked_entry(&xas); 625 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 626 goto out; 627 if (!trunc && 628 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || 629 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) 630 goto out; 631 dax_disassociate_entry(entry, mapping, trunc); 632 xas_store(&xas, NULL); 633 mapping->nrexceptional--; 634 ret = 1; 635 out: 636 put_unlocked_entry(&xas, entry); 637 xas_unlock_irq(&xas); 638 return ret; 639 } 640 641 /* 642 * Delete DAX entry at @index from @mapping. Wait for it 643 * to be unlocked before deleting it. 644 */ 645 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 646 { 647 int ret = __dax_invalidate_entry(mapping, index, true); 648 649 /* 650 * This gets called from truncate / punch_hole path. As such, the caller 651 * must hold locks protecting against concurrent modifications of the 652 * page cache (usually fs-private i_mmap_sem for writing). Since the 653 * caller has seen a DAX entry for this index, we better find it 654 * at that index as well... 655 */ 656 WARN_ON_ONCE(!ret); 657 return ret; 658 } 659 660 /* 661 * Invalidate DAX entry if it is clean. 662 */ 663 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 664 pgoff_t index) 665 { 666 return __dax_invalidate_entry(mapping, index, false); 667 } 668 669 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, 670 sector_t sector, size_t size, struct page *to, 671 unsigned long vaddr) 672 { 673 void *vto, *kaddr; 674 pgoff_t pgoff; 675 long rc; 676 int id; 677 678 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 679 if (rc) 680 return rc; 681 682 id = dax_read_lock(); 683 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL); 684 if (rc < 0) { 685 dax_read_unlock(id); 686 return rc; 687 } 688 vto = kmap_atomic(to); 689 copy_user_page(vto, (void __force *)kaddr, vaddr, to); 690 kunmap_atomic(vto); 691 dax_read_unlock(id); 692 return 0; 693 } 694 695 /* 696 * By this point grab_mapping_entry() has ensured that we have a locked entry 697 * of the appropriate size so we don't have to worry about downgrading PMDs to 698 * PTEs. If we happen to be trying to insert a PTE and there is a PMD 699 * already in the tree, we will skip the insertion and just dirty the PMD as 700 * appropriate. 701 */ 702 static void *dax_insert_entry(struct xa_state *xas, 703 struct address_space *mapping, struct vm_fault *vmf, 704 void *entry, pfn_t pfn, unsigned long flags, bool dirty) 705 { 706 void *new_entry = dax_make_entry(pfn, flags); 707 708 if (dirty) 709 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 710 711 if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) { 712 unsigned long index = xas->xa_index; 713 /* we are replacing a zero page with block mapping */ 714 if (dax_is_pmd_entry(entry)) 715 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 716 PG_PMD_NR, false); 717 else /* pte entry */ 718 unmap_mapping_pages(mapping, index, 1, false); 719 } 720 721 xas_reset(xas); 722 xas_lock_irq(xas); 723 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 724 void *old; 725 726 dax_disassociate_entry(entry, mapping, false); 727 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); 728 /* 729 * Only swap our new entry into the page cache if the current 730 * entry is a zero page or an empty entry. If a normal PTE or 731 * PMD entry is already in the cache, we leave it alone. This 732 * means that if we are trying to insert a PTE and the 733 * existing entry is a PMD, we will just leave the PMD in the 734 * tree and dirty it if necessary. 735 */ 736 old = dax_lock_entry(xas, new_entry); 737 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | 738 DAX_LOCKED)); 739 entry = new_entry; 740 } else { 741 xas_load(xas); /* Walk the xa_state */ 742 } 743 744 if (dirty) 745 xas_set_mark(xas, PAGECACHE_TAG_DIRTY); 746 747 xas_unlock_irq(xas); 748 return entry; 749 } 750 751 static inline 752 unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma) 753 { 754 unsigned long address; 755 756 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 757 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 758 return address; 759 } 760 761 /* Walk all mappings of a given index of a file and writeprotect them */ 762 static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index, 763 unsigned long pfn) 764 { 765 struct vm_area_struct *vma; 766 pte_t pte, *ptep = NULL; 767 pmd_t *pmdp = NULL; 768 spinlock_t *ptl; 769 770 i_mmap_lock_read(mapping); 771 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 772 struct mmu_notifier_range range; 773 unsigned long address; 774 775 cond_resched(); 776 777 if (!(vma->vm_flags & VM_SHARED)) 778 continue; 779 780 address = pgoff_address(index, vma); 781 782 /* 783 * Note because we provide range to follow_pte_pmd it will 784 * call mmu_notifier_invalidate_range_start() on our behalf 785 * before taking any lock. 786 */ 787 if (follow_pte_pmd(vma->vm_mm, address, &range, 788 &ptep, &pmdp, &ptl)) 789 continue; 790 791 /* 792 * No need to call mmu_notifier_invalidate_range() as we are 793 * downgrading page table protection not changing it to point 794 * to a new page. 795 * 796 * See Documentation/vm/mmu_notifier.rst 797 */ 798 if (pmdp) { 799 #ifdef CONFIG_FS_DAX_PMD 800 pmd_t pmd; 801 802 if (pfn != pmd_pfn(*pmdp)) 803 goto unlock_pmd; 804 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) 805 goto unlock_pmd; 806 807 flush_cache_page(vma, address, pfn); 808 pmd = pmdp_invalidate(vma, address, pmdp); 809 pmd = pmd_wrprotect(pmd); 810 pmd = pmd_mkclean(pmd); 811 set_pmd_at(vma->vm_mm, address, pmdp, pmd); 812 unlock_pmd: 813 #endif 814 spin_unlock(ptl); 815 } else { 816 if (pfn != pte_pfn(*ptep)) 817 goto unlock_pte; 818 if (!pte_dirty(*ptep) && !pte_write(*ptep)) 819 goto unlock_pte; 820 821 flush_cache_page(vma, address, pfn); 822 pte = ptep_clear_flush(vma, address, ptep); 823 pte = pte_wrprotect(pte); 824 pte = pte_mkclean(pte); 825 set_pte_at(vma->vm_mm, address, ptep, pte); 826 unlock_pte: 827 pte_unmap_unlock(ptep, ptl); 828 } 829 830 mmu_notifier_invalidate_range_end(&range); 831 } 832 i_mmap_unlock_read(mapping); 833 } 834 835 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, 836 struct address_space *mapping, void *entry) 837 { 838 unsigned long pfn, index, count; 839 long ret = 0; 840 841 /* 842 * A page got tagged dirty in DAX mapping? Something is seriously 843 * wrong. 844 */ 845 if (WARN_ON(!xa_is_value(entry))) 846 return -EIO; 847 848 if (unlikely(dax_is_locked(entry))) { 849 void *old_entry = entry; 850 851 entry = get_unlocked_entry(xas); 852 853 /* Entry got punched out / reallocated? */ 854 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 855 goto put_unlocked; 856 /* 857 * Entry got reallocated elsewhere? No need to writeback. 858 * We have to compare pfns as we must not bail out due to 859 * difference in lockbit or entry type. 860 */ 861 if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) 862 goto put_unlocked; 863 if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 864 dax_is_zero_entry(entry))) { 865 ret = -EIO; 866 goto put_unlocked; 867 } 868 869 /* Another fsync thread may have already done this entry */ 870 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) 871 goto put_unlocked; 872 } 873 874 /* Lock the entry to serialize with page faults */ 875 dax_lock_entry(xas, entry); 876 877 /* 878 * We can clear the tag now but we have to be careful so that concurrent 879 * dax_writeback_one() calls for the same index cannot finish before we 880 * actually flush the caches. This is achieved as the calls will look 881 * at the entry only under the i_pages lock and once they do that 882 * they will see the entry locked and wait for it to unlock. 883 */ 884 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); 885 xas_unlock_irq(xas); 886 887 /* 888 * If dax_writeback_mapping_range() was given a wbc->range_start 889 * in the middle of a PMD, the 'index' we use needs to be 890 * aligned to the start of the PMD. 891 * This allows us to flush for PMD_SIZE and not have to worry about 892 * partial PMD writebacks. 893 */ 894 pfn = dax_to_pfn(entry); 895 count = 1UL << dax_entry_order(entry); 896 index = xas->xa_index & ~(count - 1); 897 898 dax_entry_mkclean(mapping, index, pfn); 899 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE); 900 /* 901 * After we have flushed the cache, we can clear the dirty tag. There 902 * cannot be new dirty data in the pfn after the flush has completed as 903 * the pfn mappings are writeprotected and fault waits for mapping 904 * entry lock. 905 */ 906 xas_reset(xas); 907 xas_lock_irq(xas); 908 xas_store(xas, entry); 909 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); 910 dax_wake_entry(xas, entry, false); 911 912 trace_dax_writeback_one(mapping->host, index, count); 913 return ret; 914 915 put_unlocked: 916 put_unlocked_entry(xas, entry); 917 return ret; 918 } 919 920 /* 921 * Flush the mapping to the persistent domain within the byte range of [start, 922 * end]. This is required by data integrity operations to ensure file data is 923 * on persistent storage prior to completion of the operation. 924 */ 925 int dax_writeback_mapping_range(struct address_space *mapping, 926 struct block_device *bdev, struct writeback_control *wbc) 927 { 928 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); 929 struct inode *inode = mapping->host; 930 pgoff_t end_index = wbc->range_end >> PAGE_SHIFT; 931 struct dax_device *dax_dev; 932 void *entry; 933 int ret = 0; 934 unsigned int scanned = 0; 935 936 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 937 return -EIO; 938 939 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 940 return 0; 941 942 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 943 if (!dax_dev) 944 return -EIO; 945 946 trace_dax_writeback_range(inode, xas.xa_index, end_index); 947 948 tag_pages_for_writeback(mapping, xas.xa_index, end_index); 949 950 xas_lock_irq(&xas); 951 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { 952 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); 953 if (ret < 0) { 954 mapping_set_error(mapping, ret); 955 break; 956 } 957 if (++scanned % XA_CHECK_SCHED) 958 continue; 959 960 xas_pause(&xas); 961 xas_unlock_irq(&xas); 962 cond_resched(); 963 xas_lock_irq(&xas); 964 } 965 xas_unlock_irq(&xas); 966 put_dax(dax_dev); 967 trace_dax_writeback_range_done(inode, xas.xa_index, end_index); 968 return ret; 969 } 970 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 971 972 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 973 { 974 return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9; 975 } 976 977 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size, 978 pfn_t *pfnp) 979 { 980 const sector_t sector = dax_iomap_sector(iomap, pos); 981 pgoff_t pgoff; 982 int id, rc; 983 long length; 984 985 rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff); 986 if (rc) 987 return rc; 988 id = dax_read_lock(); 989 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 990 NULL, pfnp); 991 if (length < 0) { 992 rc = length; 993 goto out; 994 } 995 rc = -EINVAL; 996 if (PFN_PHYS(length) < size) 997 goto out; 998 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 999 goto out; 1000 /* For larger pages we need devmap */ 1001 if (length > 1 && !pfn_t_devmap(*pfnp)) 1002 goto out; 1003 rc = 0; 1004 out: 1005 dax_read_unlock(id); 1006 return rc; 1007 } 1008 1009 /* 1010 * The user has performed a load from a hole in the file. Allocating a new 1011 * page in the file would cause excessive storage usage for workloads with 1012 * sparse files. Instead we insert a read-only mapping of the 4k zero page. 1013 * If this page is ever written to we will re-fault and change the mapping to 1014 * point to real DAX storage instead. 1015 */ 1016 static vm_fault_t dax_load_hole(struct xa_state *xas, 1017 struct address_space *mapping, void **entry, 1018 struct vm_fault *vmf) 1019 { 1020 struct inode *inode = mapping->host; 1021 unsigned long vaddr = vmf->address; 1022 pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); 1023 vm_fault_t ret; 1024 1025 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, 1026 DAX_ZERO_PAGE, false); 1027 1028 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); 1029 trace_dax_load_hole(inode, vmf, ret); 1030 return ret; 1031 } 1032 1033 static bool dax_range_is_aligned(struct block_device *bdev, 1034 unsigned int offset, unsigned int length) 1035 { 1036 unsigned short sector_size = bdev_logical_block_size(bdev); 1037 1038 if (!IS_ALIGNED(offset, sector_size)) 1039 return false; 1040 if (!IS_ALIGNED(length, sector_size)) 1041 return false; 1042 1043 return true; 1044 } 1045 1046 int __dax_zero_page_range(struct block_device *bdev, 1047 struct dax_device *dax_dev, sector_t sector, 1048 unsigned int offset, unsigned int size) 1049 { 1050 if (dax_range_is_aligned(bdev, offset, size)) { 1051 sector_t start_sector = sector + (offset >> 9); 1052 1053 return blkdev_issue_zeroout(bdev, start_sector, 1054 size >> 9, GFP_NOFS, 0); 1055 } else { 1056 pgoff_t pgoff; 1057 long rc, id; 1058 void *kaddr; 1059 1060 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); 1061 if (rc) 1062 return rc; 1063 1064 id = dax_read_lock(); 1065 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL); 1066 if (rc < 0) { 1067 dax_read_unlock(id); 1068 return rc; 1069 } 1070 memset(kaddr + offset, 0, size); 1071 dax_flush(dax_dev, kaddr + offset, size); 1072 dax_read_unlock(id); 1073 } 1074 return 0; 1075 } 1076 EXPORT_SYMBOL_GPL(__dax_zero_page_range); 1077 1078 static loff_t 1079 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 1080 struct iomap *iomap) 1081 { 1082 struct block_device *bdev = iomap->bdev; 1083 struct dax_device *dax_dev = iomap->dax_dev; 1084 struct iov_iter *iter = data; 1085 loff_t end = pos + length, done = 0; 1086 ssize_t ret = 0; 1087 size_t xfer; 1088 int id; 1089 1090 if (iov_iter_rw(iter) == READ) { 1091 end = min(end, i_size_read(inode)); 1092 if (pos >= end) 1093 return 0; 1094 1095 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 1096 return iov_iter_zero(min(length, end - pos), iter); 1097 } 1098 1099 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 1100 return -EIO; 1101 1102 /* 1103 * Write can allocate block for an area which has a hole page mapped 1104 * into page tables. We have to tear down these mappings so that data 1105 * written by write(2) is visible in mmap. 1106 */ 1107 if (iomap->flags & IOMAP_F_NEW) { 1108 invalidate_inode_pages2_range(inode->i_mapping, 1109 pos >> PAGE_SHIFT, 1110 (end - 1) >> PAGE_SHIFT); 1111 } 1112 1113 id = dax_read_lock(); 1114 while (pos < end) { 1115 unsigned offset = pos & (PAGE_SIZE - 1); 1116 const size_t size = ALIGN(length + offset, PAGE_SIZE); 1117 const sector_t sector = dax_iomap_sector(iomap, pos); 1118 ssize_t map_len; 1119 pgoff_t pgoff; 1120 void *kaddr; 1121 1122 if (fatal_signal_pending(current)) { 1123 ret = -EINTR; 1124 break; 1125 } 1126 1127 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 1128 if (ret) 1129 break; 1130 1131 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 1132 &kaddr, NULL); 1133 if (map_len < 0) { 1134 ret = map_len; 1135 break; 1136 } 1137 1138 map_len = PFN_PHYS(map_len); 1139 kaddr += offset; 1140 map_len -= offset; 1141 if (map_len > end - pos) 1142 map_len = end - pos; 1143 1144 /* 1145 * The userspace address for the memory copy has already been 1146 * validated via access_ok() in either vfs_read() or 1147 * vfs_write(), depending on which operation we are doing. 1148 */ 1149 if (iov_iter_rw(iter) == WRITE) 1150 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1151 map_len, iter); 1152 else 1153 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, 1154 map_len, iter); 1155 1156 pos += xfer; 1157 length -= xfer; 1158 done += xfer; 1159 1160 if (xfer == 0) 1161 ret = -EFAULT; 1162 if (xfer < map_len) 1163 break; 1164 } 1165 dax_read_unlock(id); 1166 1167 return done ? done : ret; 1168 } 1169 1170 /** 1171 * dax_iomap_rw - Perform I/O to a DAX file 1172 * @iocb: The control block for this I/O 1173 * @iter: The addresses to do I/O from or to 1174 * @ops: iomap ops passed from the file system 1175 * 1176 * This function performs read and write operations to directly mapped 1177 * persistent memory. The callers needs to take care of read/write exclusion 1178 * and evicting any page cache pages in the region under I/O. 1179 */ 1180 ssize_t 1181 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 1182 const struct iomap_ops *ops) 1183 { 1184 struct address_space *mapping = iocb->ki_filp->f_mapping; 1185 struct inode *inode = mapping->host; 1186 loff_t pos = iocb->ki_pos, ret = 0, done = 0; 1187 unsigned flags = 0; 1188 1189 if (iov_iter_rw(iter) == WRITE) { 1190 lockdep_assert_held_write(&inode->i_rwsem); 1191 flags |= IOMAP_WRITE; 1192 } else { 1193 lockdep_assert_held(&inode->i_rwsem); 1194 } 1195 1196 while (iov_iter_count(iter)) { 1197 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, 1198 iter, dax_iomap_actor); 1199 if (ret <= 0) 1200 break; 1201 pos += ret; 1202 done += ret; 1203 } 1204 1205 iocb->ki_pos += done; 1206 return done ? done : ret; 1207 } 1208 EXPORT_SYMBOL_GPL(dax_iomap_rw); 1209 1210 static vm_fault_t dax_fault_return(int error) 1211 { 1212 if (error == 0) 1213 return VM_FAULT_NOPAGE; 1214 return vmf_error(error); 1215 } 1216 1217 /* 1218 * MAP_SYNC on a dax mapping guarantees dirty metadata is 1219 * flushed on write-faults (non-cow), but not read-faults. 1220 */ 1221 static bool dax_fault_is_synchronous(unsigned long flags, 1222 struct vm_area_struct *vma, struct iomap *iomap) 1223 { 1224 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) 1225 && (iomap->flags & IOMAP_F_DIRTY); 1226 } 1227 1228 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1229 int *iomap_errp, const struct iomap_ops *ops) 1230 { 1231 struct vm_area_struct *vma = vmf->vma; 1232 struct address_space *mapping = vma->vm_file->f_mapping; 1233 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); 1234 struct inode *inode = mapping->host; 1235 unsigned long vaddr = vmf->address; 1236 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; 1237 struct iomap iomap = { 0 }; 1238 unsigned flags = IOMAP_FAULT; 1239 int error, major = 0; 1240 bool write = vmf->flags & FAULT_FLAG_WRITE; 1241 bool sync; 1242 vm_fault_t ret = 0; 1243 void *entry; 1244 pfn_t pfn; 1245 1246 trace_dax_pte_fault(inode, vmf, ret); 1247 /* 1248 * Check whether offset isn't beyond end of file now. Caller is supposed 1249 * to hold locks serializing us with truncate / punch hole so this is 1250 * a reliable test. 1251 */ 1252 if (pos >= i_size_read(inode)) { 1253 ret = VM_FAULT_SIGBUS; 1254 goto out; 1255 } 1256 1257 if (write && !vmf->cow_page) 1258 flags |= IOMAP_WRITE; 1259 1260 entry = grab_mapping_entry(&xas, mapping, 0); 1261 if (xa_is_internal(entry)) { 1262 ret = xa_to_internal(entry); 1263 goto out; 1264 } 1265 1266 /* 1267 * It is possible, particularly with mixed reads & writes to private 1268 * mappings, that we have raced with a PMD fault that overlaps with 1269 * the PTE we need to set up. If so just return and the fault will be 1270 * retried. 1271 */ 1272 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1273 ret = VM_FAULT_NOPAGE; 1274 goto unlock_entry; 1275 } 1276 1277 /* 1278 * Note that we don't bother to use iomap_apply here: DAX required 1279 * the file system block size to be equal the page size, which means 1280 * that we never have to deal with more than a single extent here. 1281 */ 1282 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); 1283 if (iomap_errp) 1284 *iomap_errp = error; 1285 if (error) { 1286 ret = dax_fault_return(error); 1287 goto unlock_entry; 1288 } 1289 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 1290 error = -EIO; /* fs corruption? */ 1291 goto error_finish_iomap; 1292 } 1293 1294 if (vmf->cow_page) { 1295 sector_t sector = dax_iomap_sector(&iomap, pos); 1296 1297 switch (iomap.type) { 1298 case IOMAP_HOLE: 1299 case IOMAP_UNWRITTEN: 1300 clear_user_highpage(vmf->cow_page, vaddr); 1301 break; 1302 case IOMAP_MAPPED: 1303 error = copy_user_dax(iomap.bdev, iomap.dax_dev, 1304 sector, PAGE_SIZE, vmf->cow_page, vaddr); 1305 break; 1306 default: 1307 WARN_ON_ONCE(1); 1308 error = -EIO; 1309 break; 1310 } 1311 1312 if (error) 1313 goto error_finish_iomap; 1314 1315 __SetPageUptodate(vmf->cow_page); 1316 ret = finish_fault(vmf); 1317 if (!ret) 1318 ret = VM_FAULT_DONE_COW; 1319 goto finish_iomap; 1320 } 1321 1322 sync = dax_fault_is_synchronous(flags, vma, &iomap); 1323 1324 switch (iomap.type) { 1325 case IOMAP_MAPPED: 1326 if (iomap.flags & IOMAP_F_NEW) { 1327 count_vm_event(PGMAJFAULT); 1328 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 1329 major = VM_FAULT_MAJOR; 1330 } 1331 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn); 1332 if (error < 0) 1333 goto error_finish_iomap; 1334 1335 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, 1336 0, write && !sync); 1337 1338 /* 1339 * If we are doing synchronous page fault and inode needs fsync, 1340 * we can insert PTE into page tables only after that happens. 1341 * Skip insertion for now and return the pfn so that caller can 1342 * insert it after fsync is done. 1343 */ 1344 if (sync) { 1345 if (WARN_ON_ONCE(!pfnp)) { 1346 error = -EIO; 1347 goto error_finish_iomap; 1348 } 1349 *pfnp = pfn; 1350 ret = VM_FAULT_NEEDDSYNC | major; 1351 goto finish_iomap; 1352 } 1353 trace_dax_insert_mapping(inode, vmf, entry); 1354 if (write) 1355 ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn); 1356 else 1357 ret = vmf_insert_mixed(vma, vaddr, pfn); 1358 1359 goto finish_iomap; 1360 case IOMAP_UNWRITTEN: 1361 case IOMAP_HOLE: 1362 if (!write) { 1363 ret = dax_load_hole(&xas, mapping, &entry, vmf); 1364 goto finish_iomap; 1365 } 1366 /*FALLTHRU*/ 1367 default: 1368 WARN_ON_ONCE(1); 1369 error = -EIO; 1370 break; 1371 } 1372 1373 error_finish_iomap: 1374 ret = dax_fault_return(error); 1375 finish_iomap: 1376 if (ops->iomap_end) { 1377 int copied = PAGE_SIZE; 1378 1379 if (ret & VM_FAULT_ERROR) 1380 copied = 0; 1381 /* 1382 * The fault is done by now and there's no way back (other 1383 * thread may be already happily using PTE we have installed). 1384 * Just ignore error from ->iomap_end since we cannot do much 1385 * with it. 1386 */ 1387 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); 1388 } 1389 unlock_entry: 1390 dax_unlock_entry(&xas, entry); 1391 out: 1392 trace_dax_pte_fault_done(inode, vmf, ret); 1393 return ret | major; 1394 } 1395 1396 #ifdef CONFIG_FS_DAX_PMD 1397 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1398 struct iomap *iomap, void **entry) 1399 { 1400 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1401 unsigned long pmd_addr = vmf->address & PMD_MASK; 1402 struct vm_area_struct *vma = vmf->vma; 1403 struct inode *inode = mapping->host; 1404 pgtable_t pgtable = NULL; 1405 struct page *zero_page; 1406 spinlock_t *ptl; 1407 pmd_t pmd_entry; 1408 pfn_t pfn; 1409 1410 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1411 1412 if (unlikely(!zero_page)) 1413 goto fallback; 1414 1415 pfn = page_to_pfn_t(zero_page); 1416 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, 1417 DAX_PMD | DAX_ZERO_PAGE, false); 1418 1419 if (arch_needs_pgtable_deposit()) { 1420 pgtable = pte_alloc_one(vma->vm_mm); 1421 if (!pgtable) 1422 return VM_FAULT_OOM; 1423 } 1424 1425 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1426 if (!pmd_none(*(vmf->pmd))) { 1427 spin_unlock(ptl); 1428 goto fallback; 1429 } 1430 1431 if (pgtable) { 1432 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 1433 mm_inc_nr_ptes(vma->vm_mm); 1434 } 1435 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1436 pmd_entry = pmd_mkhuge(pmd_entry); 1437 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1438 spin_unlock(ptl); 1439 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); 1440 return VM_FAULT_NOPAGE; 1441 1442 fallback: 1443 if (pgtable) 1444 pte_free(vma->vm_mm, pgtable); 1445 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); 1446 return VM_FAULT_FALLBACK; 1447 } 1448 1449 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1450 const struct iomap_ops *ops) 1451 { 1452 struct vm_area_struct *vma = vmf->vma; 1453 struct address_space *mapping = vma->vm_file->f_mapping; 1454 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); 1455 unsigned long pmd_addr = vmf->address & PMD_MASK; 1456 bool write = vmf->flags & FAULT_FLAG_WRITE; 1457 bool sync; 1458 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; 1459 struct inode *inode = mapping->host; 1460 vm_fault_t result = VM_FAULT_FALLBACK; 1461 struct iomap iomap = { 0 }; 1462 pgoff_t max_pgoff; 1463 void *entry; 1464 loff_t pos; 1465 int error; 1466 pfn_t pfn; 1467 1468 /* 1469 * Check whether offset isn't beyond end of file now. Caller is 1470 * supposed to hold locks serializing us with truncate / punch hole so 1471 * this is a reliable test. 1472 */ 1473 max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1474 1475 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); 1476 1477 /* 1478 * Make sure that the faulting address's PMD offset (color) matches 1479 * the PMD offset from the start of the file. This is necessary so 1480 * that a PMD range in the page table overlaps exactly with a PMD 1481 * range in the page cache. 1482 */ 1483 if ((vmf->pgoff & PG_PMD_COLOUR) != 1484 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 1485 goto fallback; 1486 1487 /* Fall back to PTEs if we're going to COW */ 1488 if (write && !(vma->vm_flags & VM_SHARED)) 1489 goto fallback; 1490 1491 /* If the PMD would extend outside the VMA */ 1492 if (pmd_addr < vma->vm_start) 1493 goto fallback; 1494 if ((pmd_addr + PMD_SIZE) > vma->vm_end) 1495 goto fallback; 1496 1497 if (xas.xa_index >= max_pgoff) { 1498 result = VM_FAULT_SIGBUS; 1499 goto out; 1500 } 1501 1502 /* If the PMD would extend beyond the file size */ 1503 if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff) 1504 goto fallback; 1505 1506 /* 1507 * grab_mapping_entry() will make sure we get an empty PMD entry, 1508 * a zero PMD entry or a DAX PMD. If it can't (because a PTE 1509 * entry is already in the array, for instance), it will return 1510 * VM_FAULT_FALLBACK. 1511 */ 1512 entry = grab_mapping_entry(&xas, mapping, DAX_PMD); 1513 if (xa_is_internal(entry)) { 1514 result = xa_to_internal(entry); 1515 goto fallback; 1516 } 1517 1518 /* 1519 * It is possible, particularly with mixed reads & writes to private 1520 * mappings, that we have raced with a PTE fault that overlaps with 1521 * the PMD we need to set up. If so just return and the fault will be 1522 * retried. 1523 */ 1524 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1525 !pmd_devmap(*vmf->pmd)) { 1526 result = 0; 1527 goto unlock_entry; 1528 } 1529 1530 /* 1531 * Note that we don't use iomap_apply here. We aren't doing I/O, only 1532 * setting up a mapping, so really we're using iomap_begin() as a way 1533 * to look up our filesystem block. 1534 */ 1535 pos = (loff_t)xas.xa_index << PAGE_SHIFT; 1536 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); 1537 if (error) 1538 goto unlock_entry; 1539 1540 if (iomap.offset + iomap.length < pos + PMD_SIZE) 1541 goto finish_iomap; 1542 1543 sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap); 1544 1545 switch (iomap.type) { 1546 case IOMAP_MAPPED: 1547 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn); 1548 if (error < 0) 1549 goto finish_iomap; 1550 1551 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, 1552 DAX_PMD, write && !sync); 1553 1554 /* 1555 * If we are doing synchronous page fault and inode needs fsync, 1556 * we can insert PMD into page tables only after that happens. 1557 * Skip insertion for now and return the pfn so that caller can 1558 * insert it after fsync is done. 1559 */ 1560 if (sync) { 1561 if (WARN_ON_ONCE(!pfnp)) 1562 goto finish_iomap; 1563 *pfnp = pfn; 1564 result = VM_FAULT_NEEDDSYNC; 1565 goto finish_iomap; 1566 } 1567 1568 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); 1569 result = vmf_insert_pfn_pmd(vmf, pfn, write); 1570 break; 1571 case IOMAP_UNWRITTEN: 1572 case IOMAP_HOLE: 1573 if (WARN_ON_ONCE(write)) 1574 break; 1575 result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry); 1576 break; 1577 default: 1578 WARN_ON_ONCE(1); 1579 break; 1580 } 1581 1582 finish_iomap: 1583 if (ops->iomap_end) { 1584 int copied = PMD_SIZE; 1585 1586 if (result == VM_FAULT_FALLBACK) 1587 copied = 0; 1588 /* 1589 * The fault is done by now and there's no way back (other 1590 * thread may be already happily using PMD we have installed). 1591 * Just ignore error from ->iomap_end since we cannot do much 1592 * with it. 1593 */ 1594 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, 1595 &iomap); 1596 } 1597 unlock_entry: 1598 dax_unlock_entry(&xas, entry); 1599 fallback: 1600 if (result == VM_FAULT_FALLBACK) { 1601 split_huge_pmd(vma, vmf->pmd, vmf->address); 1602 count_vm_event(THP_FAULT_FALLBACK); 1603 } 1604 out: 1605 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); 1606 return result; 1607 } 1608 #else 1609 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1610 const struct iomap_ops *ops) 1611 { 1612 return VM_FAULT_FALLBACK; 1613 } 1614 #endif /* CONFIG_FS_DAX_PMD */ 1615 1616 /** 1617 * dax_iomap_fault - handle a page fault on a DAX file 1618 * @vmf: The description of the fault 1619 * @pe_size: Size of the page to fault in 1620 * @pfnp: PFN to insert for synchronous faults if fsync is required 1621 * @iomap_errp: Storage for detailed error code in case of error 1622 * @ops: Iomap ops passed from the file system 1623 * 1624 * When a page fault occurs, filesystems may call this helper in 1625 * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1626 * has done all the necessary locking for page fault to proceed 1627 * successfully. 1628 */ 1629 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1630 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) 1631 { 1632 switch (pe_size) { 1633 case PE_SIZE_PTE: 1634 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); 1635 case PE_SIZE_PMD: 1636 return dax_iomap_pmd_fault(vmf, pfnp, ops); 1637 default: 1638 return VM_FAULT_FALLBACK; 1639 } 1640 } 1641 EXPORT_SYMBOL_GPL(dax_iomap_fault); 1642 1643 /* 1644 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables 1645 * @vmf: The description of the fault 1646 * @pfn: PFN to insert 1647 * @order: Order of entry to insert. 1648 * 1649 * This function inserts a writeable PTE or PMD entry into the page tables 1650 * for an mmaped DAX file. It also marks the page cache entry as dirty. 1651 */ 1652 static vm_fault_t 1653 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) 1654 { 1655 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1656 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); 1657 void *entry; 1658 vm_fault_t ret; 1659 1660 xas_lock_irq(&xas); 1661 entry = get_unlocked_entry(&xas); 1662 /* Did we race with someone splitting entry or so? */ 1663 if (!entry || 1664 (order == 0 && !dax_is_pte_entry(entry)) || 1665 (order == PMD_ORDER && !dax_is_pmd_entry(entry))) { 1666 put_unlocked_entry(&xas, entry); 1667 xas_unlock_irq(&xas); 1668 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 1669 VM_FAULT_NOPAGE); 1670 return VM_FAULT_NOPAGE; 1671 } 1672 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); 1673 dax_lock_entry(&xas, entry); 1674 xas_unlock_irq(&xas); 1675 if (order == 0) 1676 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 1677 #ifdef CONFIG_FS_DAX_PMD 1678 else if (order == PMD_ORDER) 1679 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); 1680 #endif 1681 else 1682 ret = VM_FAULT_FALLBACK; 1683 dax_unlock_entry(&xas, entry); 1684 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); 1685 return ret; 1686 } 1687 1688 /** 1689 * dax_finish_sync_fault - finish synchronous page fault 1690 * @vmf: The description of the fault 1691 * @pe_size: Size of entry to be inserted 1692 * @pfn: PFN to insert 1693 * 1694 * This function ensures that the file range touched by the page fault is 1695 * stored persistently on the media and handles inserting of appropriate page 1696 * table entry. 1697 */ 1698 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 1699 enum page_entry_size pe_size, pfn_t pfn) 1700 { 1701 int err; 1702 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; 1703 unsigned int order = pe_order(pe_size); 1704 size_t len = PAGE_SIZE << order; 1705 1706 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); 1707 if (err) 1708 return VM_FAULT_SIGBUS; 1709 return dax_insert_pfn_mkwrite(vmf, pfn, order); 1710 } 1711 EXPORT_SYMBOL_GPL(dax_finish_sync_fault); 1712