1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * fs/dax.c - Direct Access filesystem code 4 * Copyright (c) 2013-2014 Intel Corporation 5 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 6 * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 7 */ 8 9 #include <linux/atomic.h> 10 #include <linux/blkdev.h> 11 #include <linux/buffer_head.h> 12 #include <linux/dax.h> 13 #include <linux/fs.h> 14 #include <linux/genhd.h> 15 #include <linux/highmem.h> 16 #include <linux/memcontrol.h> 17 #include <linux/mm.h> 18 #include <linux/mutex.h> 19 #include <linux/pagevec.h> 20 #include <linux/sched.h> 21 #include <linux/sched/signal.h> 22 #include <linux/uio.h> 23 #include <linux/vmstat.h> 24 #include <linux/pfn_t.h> 25 #include <linux/sizes.h> 26 #include <linux/mmu_notifier.h> 27 #include <linux/iomap.h> 28 #include <asm/pgalloc.h> 29 30 #define CREATE_TRACE_POINTS 31 #include <trace/events/fs_dax.h> 32 33 static inline unsigned int pe_order(enum page_entry_size pe_size) 34 { 35 if (pe_size == PE_SIZE_PTE) 36 return PAGE_SHIFT - PAGE_SHIFT; 37 if (pe_size == PE_SIZE_PMD) 38 return PMD_SHIFT - PAGE_SHIFT; 39 if (pe_size == PE_SIZE_PUD) 40 return PUD_SHIFT - PAGE_SHIFT; 41 return ~0; 42 } 43 44 /* We choose 4096 entries - same as per-zone page wait tables */ 45 #define DAX_WAIT_TABLE_BITS 12 46 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 47 48 /* The 'colour' (ie low bits) within a PMD of a page offset. */ 49 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 50 #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) 51 52 /* The order of a PMD entry */ 53 #define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT) 54 55 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 56 57 static int __init init_dax_wait_table(void) 58 { 59 int i; 60 61 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 62 init_waitqueue_head(wait_table + i); 63 return 0; 64 } 65 fs_initcall(init_dax_wait_table); 66 67 /* 68 * DAX pagecache entries use XArray value entries so they can't be mistaken 69 * for pages. We use one bit for locking, one bit for the entry size (PMD) 70 * and two more to tell us if the entry is a zero page or an empty entry that 71 * is just used for locking. In total four special bits. 72 * 73 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 74 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 75 * block allocation. 76 */ 77 #define DAX_SHIFT (4) 78 #define DAX_LOCKED (1UL << 0) 79 #define DAX_PMD (1UL << 1) 80 #define DAX_ZERO_PAGE (1UL << 2) 81 #define DAX_EMPTY (1UL << 3) 82 83 static unsigned long dax_to_pfn(void *entry) 84 { 85 return xa_to_value(entry) >> DAX_SHIFT; 86 } 87 88 static void *dax_make_entry(pfn_t pfn, unsigned long flags) 89 { 90 return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); 91 } 92 93 static bool dax_is_locked(void *entry) 94 { 95 return xa_to_value(entry) & DAX_LOCKED; 96 } 97 98 static unsigned int dax_entry_order(void *entry) 99 { 100 if (xa_to_value(entry) & DAX_PMD) 101 return PMD_ORDER; 102 return 0; 103 } 104 105 static unsigned long dax_is_pmd_entry(void *entry) 106 { 107 return xa_to_value(entry) & DAX_PMD; 108 } 109 110 static bool dax_is_pte_entry(void *entry) 111 { 112 return !(xa_to_value(entry) & DAX_PMD); 113 } 114 115 static int dax_is_zero_entry(void *entry) 116 { 117 return xa_to_value(entry) & DAX_ZERO_PAGE; 118 } 119 120 static int dax_is_empty_entry(void *entry) 121 { 122 return xa_to_value(entry) & DAX_EMPTY; 123 } 124 125 /* 126 * true if the entry that was found is of a smaller order than the entry 127 * we were looking for 128 */ 129 static bool dax_is_conflict(void *entry) 130 { 131 return entry == XA_RETRY_ENTRY; 132 } 133 134 /* 135 * DAX page cache entry locking 136 */ 137 struct exceptional_entry_key { 138 struct xarray *xa; 139 pgoff_t entry_start; 140 }; 141 142 struct wait_exceptional_entry_queue { 143 wait_queue_entry_t wait; 144 struct exceptional_entry_key key; 145 }; 146 147 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, 148 void *entry, struct exceptional_entry_key *key) 149 { 150 unsigned long hash; 151 unsigned long index = xas->xa_index; 152 153 /* 154 * If 'entry' is a PMD, align the 'index' that we use for the wait 155 * queue to the start of that PMD. This ensures that all offsets in 156 * the range covered by the PMD map to the same bit lock. 157 */ 158 if (dax_is_pmd_entry(entry)) 159 index &= ~PG_PMD_COLOUR; 160 key->xa = xas->xa; 161 key->entry_start = index; 162 163 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); 164 return wait_table + hash; 165 } 166 167 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, 168 unsigned int mode, int sync, void *keyp) 169 { 170 struct exceptional_entry_key *key = keyp; 171 struct wait_exceptional_entry_queue *ewait = 172 container_of(wait, struct wait_exceptional_entry_queue, wait); 173 174 if (key->xa != ewait->key.xa || 175 key->entry_start != ewait->key.entry_start) 176 return 0; 177 return autoremove_wake_function(wait, mode, sync, NULL); 178 } 179 180 /* 181 * @entry may no longer be the entry at the index in the mapping. 182 * The important information it's conveying is whether the entry at 183 * this index used to be a PMD entry. 184 */ 185 static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all) 186 { 187 struct exceptional_entry_key key; 188 wait_queue_head_t *wq; 189 190 wq = dax_entry_waitqueue(xas, entry, &key); 191 192 /* 193 * Checking for locked entry and prepare_to_wait_exclusive() happens 194 * under the i_pages lock, ditto for entry handling in our callers. 195 * So at this point all tasks that could have seen our entry locked 196 * must be in the waitqueue and the following check will see them. 197 */ 198 if (waitqueue_active(wq)) 199 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 200 } 201 202 /* 203 * Look up entry in page cache, wait for it to become unlocked if it 204 * is a DAX entry and return it. The caller must subsequently call 205 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry() 206 * if it did. The entry returned may have a larger order than @order. 207 * If @order is larger than the order of the entry found in i_pages, this 208 * function returns a dax_is_conflict entry. 209 * 210 * Must be called with the i_pages lock held. 211 */ 212 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) 213 { 214 void *entry; 215 struct wait_exceptional_entry_queue ewait; 216 wait_queue_head_t *wq; 217 218 init_wait(&ewait.wait); 219 ewait.wait.func = wake_exceptional_entry_func; 220 221 for (;;) { 222 entry = xas_find_conflict(xas); 223 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 224 return entry; 225 if (dax_entry_order(entry) < order) 226 return XA_RETRY_ENTRY; 227 if (!dax_is_locked(entry)) 228 return entry; 229 230 wq = dax_entry_waitqueue(xas, entry, &ewait.key); 231 prepare_to_wait_exclusive(wq, &ewait.wait, 232 TASK_UNINTERRUPTIBLE); 233 xas_unlock_irq(xas); 234 xas_reset(xas); 235 schedule(); 236 finish_wait(wq, &ewait.wait); 237 xas_lock_irq(xas); 238 } 239 } 240 241 /* 242 * The only thing keeping the address space around is the i_pages lock 243 * (it's cycled in clear_inode() after removing the entries from i_pages) 244 * After we call xas_unlock_irq(), we cannot touch xas->xa. 245 */ 246 static void wait_entry_unlocked(struct xa_state *xas, void *entry) 247 { 248 struct wait_exceptional_entry_queue ewait; 249 wait_queue_head_t *wq; 250 251 init_wait(&ewait.wait); 252 ewait.wait.func = wake_exceptional_entry_func; 253 254 wq = dax_entry_waitqueue(xas, entry, &ewait.key); 255 /* 256 * Unlike get_unlocked_entry() there is no guarantee that this 257 * path ever successfully retrieves an unlocked entry before an 258 * inode dies. Perform a non-exclusive wait in case this path 259 * never successfully performs its own wake up. 260 */ 261 prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); 262 xas_unlock_irq(xas); 263 schedule(); 264 finish_wait(wq, &ewait.wait); 265 } 266 267 static void put_unlocked_entry(struct xa_state *xas, void *entry) 268 { 269 /* If we were the only waiter woken, wake the next one */ 270 if (entry && !dax_is_conflict(entry)) 271 dax_wake_entry(xas, entry, false); 272 } 273 274 /* 275 * We used the xa_state to get the entry, but then we locked the entry and 276 * dropped the xa_lock, so we know the xa_state is stale and must be reset 277 * before use. 278 */ 279 static void dax_unlock_entry(struct xa_state *xas, void *entry) 280 { 281 void *old; 282 283 BUG_ON(dax_is_locked(entry)); 284 xas_reset(xas); 285 xas_lock_irq(xas); 286 old = xas_store(xas, entry); 287 xas_unlock_irq(xas); 288 BUG_ON(!dax_is_locked(old)); 289 dax_wake_entry(xas, entry, false); 290 } 291 292 /* 293 * Return: The entry stored at this location before it was locked. 294 */ 295 static void *dax_lock_entry(struct xa_state *xas, void *entry) 296 { 297 unsigned long v = xa_to_value(entry); 298 return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); 299 } 300 301 static unsigned long dax_entry_size(void *entry) 302 { 303 if (dax_is_zero_entry(entry)) 304 return 0; 305 else if (dax_is_empty_entry(entry)) 306 return 0; 307 else if (dax_is_pmd_entry(entry)) 308 return PMD_SIZE; 309 else 310 return PAGE_SIZE; 311 } 312 313 static unsigned long dax_end_pfn(void *entry) 314 { 315 return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; 316 } 317 318 /* 319 * Iterate through all mapped pfns represented by an entry, i.e. skip 320 * 'empty' and 'zero' entries. 321 */ 322 #define for_each_mapped_pfn(entry, pfn) \ 323 for (pfn = dax_to_pfn(entry); \ 324 pfn < dax_end_pfn(entry); pfn++) 325 326 /* 327 * TODO: for reflink+dax we need a way to associate a single page with 328 * multiple address_space instances at different linear_page_index() 329 * offsets. 330 */ 331 static void dax_associate_entry(void *entry, struct address_space *mapping, 332 struct vm_area_struct *vma, unsigned long address) 333 { 334 unsigned long size = dax_entry_size(entry), pfn, index; 335 int i = 0; 336 337 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 338 return; 339 340 index = linear_page_index(vma, address & ~(size - 1)); 341 for_each_mapped_pfn(entry, pfn) { 342 struct page *page = pfn_to_page(pfn); 343 344 WARN_ON_ONCE(page->mapping); 345 page->mapping = mapping; 346 page->index = index + i++; 347 } 348 } 349 350 static void dax_disassociate_entry(void *entry, struct address_space *mapping, 351 bool trunc) 352 { 353 unsigned long pfn; 354 355 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 356 return; 357 358 for_each_mapped_pfn(entry, pfn) { 359 struct page *page = pfn_to_page(pfn); 360 361 WARN_ON_ONCE(trunc && page_ref_count(page) > 1); 362 WARN_ON_ONCE(page->mapping && page->mapping != mapping); 363 page->mapping = NULL; 364 page->index = 0; 365 } 366 } 367 368 static struct page *dax_busy_page(void *entry) 369 { 370 unsigned long pfn; 371 372 for_each_mapped_pfn(entry, pfn) { 373 struct page *page = pfn_to_page(pfn); 374 375 if (page_ref_count(page) > 1) 376 return page; 377 } 378 return NULL; 379 } 380 381 /* 382 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page 383 * @page: The page whose entry we want to lock 384 * 385 * Context: Process context. 386 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could 387 * not be locked. 388 */ 389 dax_entry_t dax_lock_page(struct page *page) 390 { 391 XA_STATE(xas, NULL, 0); 392 void *entry; 393 394 /* Ensure page->mapping isn't freed while we look at it */ 395 rcu_read_lock(); 396 for (;;) { 397 struct address_space *mapping = READ_ONCE(page->mapping); 398 399 entry = NULL; 400 if (!mapping || !dax_mapping(mapping)) 401 break; 402 403 /* 404 * In the device-dax case there's no need to lock, a 405 * struct dev_pagemap pin is sufficient to keep the 406 * inode alive, and we assume we have dev_pagemap pin 407 * otherwise we would not have a valid pfn_to_page() 408 * translation. 409 */ 410 entry = (void *)~0UL; 411 if (S_ISCHR(mapping->host->i_mode)) 412 break; 413 414 xas.xa = &mapping->i_pages; 415 xas_lock_irq(&xas); 416 if (mapping != page->mapping) { 417 xas_unlock_irq(&xas); 418 continue; 419 } 420 xas_set(&xas, page->index); 421 entry = xas_load(&xas); 422 if (dax_is_locked(entry)) { 423 rcu_read_unlock(); 424 wait_entry_unlocked(&xas, entry); 425 rcu_read_lock(); 426 continue; 427 } 428 dax_lock_entry(&xas, entry); 429 xas_unlock_irq(&xas); 430 break; 431 } 432 rcu_read_unlock(); 433 return (dax_entry_t)entry; 434 } 435 436 void dax_unlock_page(struct page *page, dax_entry_t cookie) 437 { 438 struct address_space *mapping = page->mapping; 439 XA_STATE(xas, &mapping->i_pages, page->index); 440 441 if (S_ISCHR(mapping->host->i_mode)) 442 return; 443 444 dax_unlock_entry(&xas, (void *)cookie); 445 } 446 447 /* 448 * Find page cache entry at given index. If it is a DAX entry, return it 449 * with the entry locked. If the page cache doesn't contain an entry at 450 * that index, add a locked empty entry. 451 * 452 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will 453 * either return that locked entry or will return VM_FAULT_FALLBACK. 454 * This will happen if there are any PTE entries within the PMD range 455 * that we are requesting. 456 * 457 * We always favor PTE entries over PMD entries. There isn't a flow where we 458 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD 459 * insertion will fail if it finds any PTE entries already in the tree, and a 460 * PTE insertion will cause an existing PMD entry to be unmapped and 461 * downgraded to PTE entries. This happens for both PMD zero pages as 462 * well as PMD empty entries. 463 * 464 * The exception to this downgrade path is for PMD entries that have 465 * real storage backing them. We will leave these real PMD entries in 466 * the tree, and PTE writes will simply dirty the entire PMD entry. 467 * 468 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 469 * persistent memory the benefit is doubtful. We can add that later if we can 470 * show it helps. 471 * 472 * On error, this function does not return an ERR_PTR. Instead it returns 473 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values 474 * overlap with xarray value entries. 475 */ 476 static void *grab_mapping_entry(struct xa_state *xas, 477 struct address_space *mapping, unsigned int order) 478 { 479 unsigned long index = xas->xa_index; 480 bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */ 481 void *entry; 482 483 retry: 484 xas_lock_irq(xas); 485 entry = get_unlocked_entry(xas, order); 486 487 if (entry) { 488 if (dax_is_conflict(entry)) 489 goto fallback; 490 if (!xa_is_value(entry)) { 491 xas_set_err(xas, -EIO); 492 goto out_unlock; 493 } 494 495 if (order == 0) { 496 if (dax_is_pmd_entry(entry) && 497 (dax_is_zero_entry(entry) || 498 dax_is_empty_entry(entry))) { 499 pmd_downgrade = true; 500 } 501 } 502 } 503 504 if (pmd_downgrade) { 505 /* 506 * Make sure 'entry' remains valid while we drop 507 * the i_pages lock. 508 */ 509 dax_lock_entry(xas, entry); 510 511 /* 512 * Besides huge zero pages the only other thing that gets 513 * downgraded are empty entries which don't need to be 514 * unmapped. 515 */ 516 if (dax_is_zero_entry(entry)) { 517 xas_unlock_irq(xas); 518 unmap_mapping_pages(mapping, 519 xas->xa_index & ~PG_PMD_COLOUR, 520 PG_PMD_NR, false); 521 xas_reset(xas); 522 xas_lock_irq(xas); 523 } 524 525 dax_disassociate_entry(entry, mapping, false); 526 xas_store(xas, NULL); /* undo the PMD join */ 527 dax_wake_entry(xas, entry, true); 528 mapping->nrexceptional--; 529 entry = NULL; 530 xas_set(xas, index); 531 } 532 533 if (entry) { 534 dax_lock_entry(xas, entry); 535 } else { 536 unsigned long flags = DAX_EMPTY; 537 538 if (order > 0) 539 flags |= DAX_PMD; 540 entry = dax_make_entry(pfn_to_pfn_t(0), flags); 541 dax_lock_entry(xas, entry); 542 if (xas_error(xas)) 543 goto out_unlock; 544 mapping->nrexceptional++; 545 } 546 547 out_unlock: 548 xas_unlock_irq(xas); 549 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) 550 goto retry; 551 if (xas->xa_node == XA_ERROR(-ENOMEM)) 552 return xa_mk_internal(VM_FAULT_OOM); 553 if (xas_error(xas)) 554 return xa_mk_internal(VM_FAULT_SIGBUS); 555 return entry; 556 fallback: 557 xas_unlock_irq(xas); 558 return xa_mk_internal(VM_FAULT_FALLBACK); 559 } 560 561 /** 562 * dax_layout_busy_page_range - find first pinned page in @mapping 563 * @mapping: address space to scan for a page with ref count > 1 564 * @start: Starting offset. Page containing 'start' is included. 565 * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX, 566 * pages from 'start' till the end of file are included. 567 * 568 * DAX requires ZONE_DEVICE mapped pages. These pages are never 569 * 'onlined' to the page allocator so they are considered idle when 570 * page->count == 1. A filesystem uses this interface to determine if 571 * any page in the mapping is busy, i.e. for DMA, or other 572 * get_user_pages() usages. 573 * 574 * It is expected that the filesystem is holding locks to block the 575 * establishment of new mappings in this address_space. I.e. it expects 576 * to be able to run unmap_mapping_range() and subsequently not race 577 * mapping_mapped() becoming true. 578 */ 579 struct page *dax_layout_busy_page_range(struct address_space *mapping, 580 loff_t start, loff_t end) 581 { 582 void *entry; 583 unsigned int scanned = 0; 584 struct page *page = NULL; 585 pgoff_t start_idx = start >> PAGE_SHIFT; 586 pgoff_t end_idx; 587 XA_STATE(xas, &mapping->i_pages, start_idx); 588 589 /* 590 * In the 'limited' case get_user_pages() for dax is disabled. 591 */ 592 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 593 return NULL; 594 595 if (!dax_mapping(mapping) || !mapping_mapped(mapping)) 596 return NULL; 597 598 /* If end == LLONG_MAX, all pages from start to till end of file */ 599 if (end == LLONG_MAX) 600 end_idx = ULONG_MAX; 601 else 602 end_idx = end >> PAGE_SHIFT; 603 /* 604 * If we race get_user_pages_fast() here either we'll see the 605 * elevated page count in the iteration and wait, or 606 * get_user_pages_fast() will see that the page it took a reference 607 * against is no longer mapped in the page tables and bail to the 608 * get_user_pages() slow path. The slow path is protected by 609 * pte_lock() and pmd_lock(). New references are not taken without 610 * holding those locks, and unmap_mapping_pages() will not zero the 611 * pte or pmd without holding the respective lock, so we are 612 * guaranteed to either see new references or prevent new 613 * references from being established. 614 */ 615 unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0); 616 617 xas_lock_irq(&xas); 618 xas_for_each(&xas, entry, end_idx) { 619 if (WARN_ON_ONCE(!xa_is_value(entry))) 620 continue; 621 if (unlikely(dax_is_locked(entry))) 622 entry = get_unlocked_entry(&xas, 0); 623 if (entry) 624 page = dax_busy_page(entry); 625 put_unlocked_entry(&xas, entry); 626 if (page) 627 break; 628 if (++scanned % XA_CHECK_SCHED) 629 continue; 630 631 xas_pause(&xas); 632 xas_unlock_irq(&xas); 633 cond_resched(); 634 xas_lock_irq(&xas); 635 } 636 xas_unlock_irq(&xas); 637 return page; 638 } 639 EXPORT_SYMBOL_GPL(dax_layout_busy_page_range); 640 641 struct page *dax_layout_busy_page(struct address_space *mapping) 642 { 643 return dax_layout_busy_page_range(mapping, 0, LLONG_MAX); 644 } 645 EXPORT_SYMBOL_GPL(dax_layout_busy_page); 646 647 static int __dax_invalidate_entry(struct address_space *mapping, 648 pgoff_t index, bool trunc) 649 { 650 XA_STATE(xas, &mapping->i_pages, index); 651 int ret = 0; 652 void *entry; 653 654 xas_lock_irq(&xas); 655 entry = get_unlocked_entry(&xas, 0); 656 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 657 goto out; 658 if (!trunc && 659 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || 660 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) 661 goto out; 662 dax_disassociate_entry(entry, mapping, trunc); 663 xas_store(&xas, NULL); 664 mapping->nrexceptional--; 665 ret = 1; 666 out: 667 put_unlocked_entry(&xas, entry); 668 xas_unlock_irq(&xas); 669 return ret; 670 } 671 672 /* 673 * Delete DAX entry at @index from @mapping. Wait for it 674 * to be unlocked before deleting it. 675 */ 676 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 677 { 678 int ret = __dax_invalidate_entry(mapping, index, true); 679 680 /* 681 * This gets called from truncate / punch_hole path. As such, the caller 682 * must hold locks protecting against concurrent modifications of the 683 * page cache (usually fs-private i_mmap_sem for writing). Since the 684 * caller has seen a DAX entry for this index, we better find it 685 * at that index as well... 686 */ 687 WARN_ON_ONCE(!ret); 688 return ret; 689 } 690 691 /* 692 * Invalidate DAX entry if it is clean. 693 */ 694 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 695 pgoff_t index) 696 { 697 return __dax_invalidate_entry(mapping, index, false); 698 } 699 700 static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_dev, 701 sector_t sector, struct page *to, unsigned long vaddr) 702 { 703 void *vto, *kaddr; 704 pgoff_t pgoff; 705 long rc; 706 int id; 707 708 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); 709 if (rc) 710 return rc; 711 712 id = dax_read_lock(); 713 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), &kaddr, NULL); 714 if (rc < 0) { 715 dax_read_unlock(id); 716 return rc; 717 } 718 vto = kmap_atomic(to); 719 copy_user_page(vto, (void __force *)kaddr, vaddr, to); 720 kunmap_atomic(vto); 721 dax_read_unlock(id); 722 return 0; 723 } 724 725 /* 726 * By this point grab_mapping_entry() has ensured that we have a locked entry 727 * of the appropriate size so we don't have to worry about downgrading PMDs to 728 * PTEs. If we happen to be trying to insert a PTE and there is a PMD 729 * already in the tree, we will skip the insertion and just dirty the PMD as 730 * appropriate. 731 */ 732 static void *dax_insert_entry(struct xa_state *xas, 733 struct address_space *mapping, struct vm_fault *vmf, 734 void *entry, pfn_t pfn, unsigned long flags, bool dirty) 735 { 736 void *new_entry = dax_make_entry(pfn, flags); 737 738 if (dirty) 739 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 740 741 if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) { 742 unsigned long index = xas->xa_index; 743 /* we are replacing a zero page with block mapping */ 744 if (dax_is_pmd_entry(entry)) 745 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 746 PG_PMD_NR, false); 747 else /* pte entry */ 748 unmap_mapping_pages(mapping, index, 1, false); 749 } 750 751 xas_reset(xas); 752 xas_lock_irq(xas); 753 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 754 void *old; 755 756 dax_disassociate_entry(entry, mapping, false); 757 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); 758 /* 759 * Only swap our new entry into the page cache if the current 760 * entry is a zero page or an empty entry. If a normal PTE or 761 * PMD entry is already in the cache, we leave it alone. This 762 * means that if we are trying to insert a PTE and the 763 * existing entry is a PMD, we will just leave the PMD in the 764 * tree and dirty it if necessary. 765 */ 766 old = dax_lock_entry(xas, new_entry); 767 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | 768 DAX_LOCKED)); 769 entry = new_entry; 770 } else { 771 xas_load(xas); /* Walk the xa_state */ 772 } 773 774 if (dirty) 775 xas_set_mark(xas, PAGECACHE_TAG_DIRTY); 776 777 xas_unlock_irq(xas); 778 return entry; 779 } 780 781 static inline 782 unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma) 783 { 784 unsigned long address; 785 786 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 787 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 788 return address; 789 } 790 791 /* Walk all mappings of a given index of a file and writeprotect them */ 792 static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index, 793 unsigned long pfn) 794 { 795 struct vm_area_struct *vma; 796 pte_t pte, *ptep = NULL; 797 pmd_t *pmdp = NULL; 798 spinlock_t *ptl; 799 800 i_mmap_lock_read(mapping); 801 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 802 struct mmu_notifier_range range; 803 unsigned long address; 804 805 cond_resched(); 806 807 if (!(vma->vm_flags & VM_SHARED)) 808 continue; 809 810 address = pgoff_address(index, vma); 811 812 /* 813 * Note because we provide range to follow_pte it will call 814 * mmu_notifier_invalidate_range_start() on our behalf before 815 * taking any lock. 816 */ 817 if (follow_pte(vma->vm_mm, address, &range, &ptep, &pmdp, &ptl)) 818 continue; 819 820 /* 821 * No need to call mmu_notifier_invalidate_range() as we are 822 * downgrading page table protection not changing it to point 823 * to a new page. 824 * 825 * See Documentation/vm/mmu_notifier.rst 826 */ 827 if (pmdp) { 828 #ifdef CONFIG_FS_DAX_PMD 829 pmd_t pmd; 830 831 if (pfn != pmd_pfn(*pmdp)) 832 goto unlock_pmd; 833 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) 834 goto unlock_pmd; 835 836 flush_cache_page(vma, address, pfn); 837 pmd = pmdp_invalidate(vma, address, pmdp); 838 pmd = pmd_wrprotect(pmd); 839 pmd = pmd_mkclean(pmd); 840 set_pmd_at(vma->vm_mm, address, pmdp, pmd); 841 unlock_pmd: 842 #endif 843 spin_unlock(ptl); 844 } else { 845 if (pfn != pte_pfn(*ptep)) 846 goto unlock_pte; 847 if (!pte_dirty(*ptep) && !pte_write(*ptep)) 848 goto unlock_pte; 849 850 flush_cache_page(vma, address, pfn); 851 pte = ptep_clear_flush(vma, address, ptep); 852 pte = pte_wrprotect(pte); 853 pte = pte_mkclean(pte); 854 set_pte_at(vma->vm_mm, address, ptep, pte); 855 unlock_pte: 856 pte_unmap_unlock(ptep, ptl); 857 } 858 859 mmu_notifier_invalidate_range_end(&range); 860 } 861 i_mmap_unlock_read(mapping); 862 } 863 864 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, 865 struct address_space *mapping, void *entry) 866 { 867 unsigned long pfn, index, count; 868 long ret = 0; 869 870 /* 871 * A page got tagged dirty in DAX mapping? Something is seriously 872 * wrong. 873 */ 874 if (WARN_ON(!xa_is_value(entry))) 875 return -EIO; 876 877 if (unlikely(dax_is_locked(entry))) { 878 void *old_entry = entry; 879 880 entry = get_unlocked_entry(xas, 0); 881 882 /* Entry got punched out / reallocated? */ 883 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 884 goto put_unlocked; 885 /* 886 * Entry got reallocated elsewhere? No need to writeback. 887 * We have to compare pfns as we must not bail out due to 888 * difference in lockbit or entry type. 889 */ 890 if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) 891 goto put_unlocked; 892 if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 893 dax_is_zero_entry(entry))) { 894 ret = -EIO; 895 goto put_unlocked; 896 } 897 898 /* Another fsync thread may have already done this entry */ 899 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) 900 goto put_unlocked; 901 } 902 903 /* Lock the entry to serialize with page faults */ 904 dax_lock_entry(xas, entry); 905 906 /* 907 * We can clear the tag now but we have to be careful so that concurrent 908 * dax_writeback_one() calls for the same index cannot finish before we 909 * actually flush the caches. This is achieved as the calls will look 910 * at the entry only under the i_pages lock and once they do that 911 * they will see the entry locked and wait for it to unlock. 912 */ 913 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); 914 xas_unlock_irq(xas); 915 916 /* 917 * If dax_writeback_mapping_range() was given a wbc->range_start 918 * in the middle of a PMD, the 'index' we use needs to be 919 * aligned to the start of the PMD. 920 * This allows us to flush for PMD_SIZE and not have to worry about 921 * partial PMD writebacks. 922 */ 923 pfn = dax_to_pfn(entry); 924 count = 1UL << dax_entry_order(entry); 925 index = xas->xa_index & ~(count - 1); 926 927 dax_entry_mkclean(mapping, index, pfn); 928 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE); 929 /* 930 * After we have flushed the cache, we can clear the dirty tag. There 931 * cannot be new dirty data in the pfn after the flush has completed as 932 * the pfn mappings are writeprotected and fault waits for mapping 933 * entry lock. 934 */ 935 xas_reset(xas); 936 xas_lock_irq(xas); 937 xas_store(xas, entry); 938 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); 939 dax_wake_entry(xas, entry, false); 940 941 trace_dax_writeback_one(mapping->host, index, count); 942 return ret; 943 944 put_unlocked: 945 put_unlocked_entry(xas, entry); 946 return ret; 947 } 948 949 /* 950 * Flush the mapping to the persistent domain within the byte range of [start, 951 * end]. This is required by data integrity operations to ensure file data is 952 * on persistent storage prior to completion of the operation. 953 */ 954 int dax_writeback_mapping_range(struct address_space *mapping, 955 struct dax_device *dax_dev, struct writeback_control *wbc) 956 { 957 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); 958 struct inode *inode = mapping->host; 959 pgoff_t end_index = wbc->range_end >> PAGE_SHIFT; 960 void *entry; 961 int ret = 0; 962 unsigned int scanned = 0; 963 964 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 965 return -EIO; 966 967 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 968 return 0; 969 970 trace_dax_writeback_range(inode, xas.xa_index, end_index); 971 972 tag_pages_for_writeback(mapping, xas.xa_index, end_index); 973 974 xas_lock_irq(&xas); 975 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { 976 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); 977 if (ret < 0) { 978 mapping_set_error(mapping, ret); 979 break; 980 } 981 if (++scanned % XA_CHECK_SCHED) 982 continue; 983 984 xas_pause(&xas); 985 xas_unlock_irq(&xas); 986 cond_resched(); 987 xas_lock_irq(&xas); 988 } 989 xas_unlock_irq(&xas); 990 trace_dax_writeback_range_done(inode, xas.xa_index, end_index); 991 return ret; 992 } 993 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 994 995 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 996 { 997 return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9; 998 } 999 1000 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size, 1001 pfn_t *pfnp) 1002 { 1003 const sector_t sector = dax_iomap_sector(iomap, pos); 1004 pgoff_t pgoff; 1005 int id, rc; 1006 long length; 1007 1008 rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff); 1009 if (rc) 1010 return rc; 1011 id = dax_read_lock(); 1012 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 1013 NULL, pfnp); 1014 if (length < 0) { 1015 rc = length; 1016 goto out; 1017 } 1018 rc = -EINVAL; 1019 if (PFN_PHYS(length) < size) 1020 goto out; 1021 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 1022 goto out; 1023 /* For larger pages we need devmap */ 1024 if (length > 1 && !pfn_t_devmap(*pfnp)) 1025 goto out; 1026 rc = 0; 1027 out: 1028 dax_read_unlock(id); 1029 return rc; 1030 } 1031 1032 /* 1033 * The user has performed a load from a hole in the file. Allocating a new 1034 * page in the file would cause excessive storage usage for workloads with 1035 * sparse files. Instead we insert a read-only mapping of the 4k zero page. 1036 * If this page is ever written to we will re-fault and change the mapping to 1037 * point to real DAX storage instead. 1038 */ 1039 static vm_fault_t dax_load_hole(struct xa_state *xas, 1040 struct address_space *mapping, void **entry, 1041 struct vm_fault *vmf) 1042 { 1043 struct inode *inode = mapping->host; 1044 unsigned long vaddr = vmf->address; 1045 pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); 1046 vm_fault_t ret; 1047 1048 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, 1049 DAX_ZERO_PAGE, false); 1050 1051 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); 1052 trace_dax_load_hole(inode, vmf, ret); 1053 return ret; 1054 } 1055 1056 s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap) 1057 { 1058 sector_t sector = iomap_sector(iomap, pos & PAGE_MASK); 1059 pgoff_t pgoff; 1060 long rc, id; 1061 void *kaddr; 1062 bool page_aligned = false; 1063 unsigned offset = offset_in_page(pos); 1064 unsigned size = min_t(u64, PAGE_SIZE - offset, length); 1065 1066 if (IS_ALIGNED(sector << SECTOR_SHIFT, PAGE_SIZE) && 1067 (size == PAGE_SIZE)) 1068 page_aligned = true; 1069 1070 rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff); 1071 if (rc) 1072 return rc; 1073 1074 id = dax_read_lock(); 1075 1076 if (page_aligned) 1077 rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1); 1078 else 1079 rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL); 1080 if (rc < 0) { 1081 dax_read_unlock(id); 1082 return rc; 1083 } 1084 1085 if (!page_aligned) { 1086 memset(kaddr + offset, 0, size); 1087 dax_flush(iomap->dax_dev, kaddr + offset, size); 1088 } 1089 dax_read_unlock(id); 1090 return size; 1091 } 1092 1093 static loff_t 1094 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 1095 struct iomap *iomap, struct iomap *srcmap) 1096 { 1097 struct block_device *bdev = iomap->bdev; 1098 struct dax_device *dax_dev = iomap->dax_dev; 1099 struct iov_iter *iter = data; 1100 loff_t end = pos + length, done = 0; 1101 ssize_t ret = 0; 1102 size_t xfer; 1103 int id; 1104 1105 if (iov_iter_rw(iter) == READ) { 1106 end = min(end, i_size_read(inode)); 1107 if (pos >= end) 1108 return 0; 1109 1110 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 1111 return iov_iter_zero(min(length, end - pos), iter); 1112 } 1113 1114 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 1115 return -EIO; 1116 1117 /* 1118 * Write can allocate block for an area which has a hole page mapped 1119 * into page tables. We have to tear down these mappings so that data 1120 * written by write(2) is visible in mmap. 1121 */ 1122 if (iomap->flags & IOMAP_F_NEW) { 1123 invalidate_inode_pages2_range(inode->i_mapping, 1124 pos >> PAGE_SHIFT, 1125 (end - 1) >> PAGE_SHIFT); 1126 } 1127 1128 id = dax_read_lock(); 1129 while (pos < end) { 1130 unsigned offset = pos & (PAGE_SIZE - 1); 1131 const size_t size = ALIGN(length + offset, PAGE_SIZE); 1132 const sector_t sector = dax_iomap_sector(iomap, pos); 1133 ssize_t map_len; 1134 pgoff_t pgoff; 1135 void *kaddr; 1136 1137 if (fatal_signal_pending(current)) { 1138 ret = -EINTR; 1139 break; 1140 } 1141 1142 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 1143 if (ret) 1144 break; 1145 1146 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 1147 &kaddr, NULL); 1148 if (map_len < 0) { 1149 ret = map_len; 1150 break; 1151 } 1152 1153 map_len = PFN_PHYS(map_len); 1154 kaddr += offset; 1155 map_len -= offset; 1156 if (map_len > end - pos) 1157 map_len = end - pos; 1158 1159 /* 1160 * The userspace address for the memory copy has already been 1161 * validated via access_ok() in either vfs_read() or 1162 * vfs_write(), depending on which operation we are doing. 1163 */ 1164 if (iov_iter_rw(iter) == WRITE) 1165 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1166 map_len, iter); 1167 else 1168 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, 1169 map_len, iter); 1170 1171 pos += xfer; 1172 length -= xfer; 1173 done += xfer; 1174 1175 if (xfer == 0) 1176 ret = -EFAULT; 1177 if (xfer < map_len) 1178 break; 1179 } 1180 dax_read_unlock(id); 1181 1182 return done ? done : ret; 1183 } 1184 1185 /** 1186 * dax_iomap_rw - Perform I/O to a DAX file 1187 * @iocb: The control block for this I/O 1188 * @iter: The addresses to do I/O from or to 1189 * @ops: iomap ops passed from the file system 1190 * 1191 * This function performs read and write operations to directly mapped 1192 * persistent memory. The callers needs to take care of read/write exclusion 1193 * and evicting any page cache pages in the region under I/O. 1194 */ 1195 ssize_t 1196 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 1197 const struct iomap_ops *ops) 1198 { 1199 struct address_space *mapping = iocb->ki_filp->f_mapping; 1200 struct inode *inode = mapping->host; 1201 loff_t pos = iocb->ki_pos, ret = 0, done = 0; 1202 unsigned flags = 0; 1203 1204 if (iov_iter_rw(iter) == WRITE) { 1205 lockdep_assert_held_write(&inode->i_rwsem); 1206 flags |= IOMAP_WRITE; 1207 } else { 1208 lockdep_assert_held(&inode->i_rwsem); 1209 } 1210 1211 if (iocb->ki_flags & IOCB_NOWAIT) 1212 flags |= IOMAP_NOWAIT; 1213 1214 while (iov_iter_count(iter)) { 1215 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, 1216 iter, dax_iomap_actor); 1217 if (ret <= 0) 1218 break; 1219 pos += ret; 1220 done += ret; 1221 } 1222 1223 iocb->ki_pos += done; 1224 return done ? done : ret; 1225 } 1226 EXPORT_SYMBOL_GPL(dax_iomap_rw); 1227 1228 static vm_fault_t dax_fault_return(int error) 1229 { 1230 if (error == 0) 1231 return VM_FAULT_NOPAGE; 1232 return vmf_error(error); 1233 } 1234 1235 /* 1236 * MAP_SYNC on a dax mapping guarantees dirty metadata is 1237 * flushed on write-faults (non-cow), but not read-faults. 1238 */ 1239 static bool dax_fault_is_synchronous(unsigned long flags, 1240 struct vm_area_struct *vma, struct iomap *iomap) 1241 { 1242 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) 1243 && (iomap->flags & IOMAP_F_DIRTY); 1244 } 1245 1246 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1247 int *iomap_errp, const struct iomap_ops *ops) 1248 { 1249 struct vm_area_struct *vma = vmf->vma; 1250 struct address_space *mapping = vma->vm_file->f_mapping; 1251 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); 1252 struct inode *inode = mapping->host; 1253 unsigned long vaddr = vmf->address; 1254 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; 1255 struct iomap iomap = { .type = IOMAP_HOLE }; 1256 struct iomap srcmap = { .type = IOMAP_HOLE }; 1257 unsigned flags = IOMAP_FAULT; 1258 int error, major = 0; 1259 bool write = vmf->flags & FAULT_FLAG_WRITE; 1260 bool sync; 1261 vm_fault_t ret = 0; 1262 void *entry; 1263 pfn_t pfn; 1264 1265 trace_dax_pte_fault(inode, vmf, ret); 1266 /* 1267 * Check whether offset isn't beyond end of file now. Caller is supposed 1268 * to hold locks serializing us with truncate / punch hole so this is 1269 * a reliable test. 1270 */ 1271 if (pos >= i_size_read(inode)) { 1272 ret = VM_FAULT_SIGBUS; 1273 goto out; 1274 } 1275 1276 if (write && !vmf->cow_page) 1277 flags |= IOMAP_WRITE; 1278 1279 entry = grab_mapping_entry(&xas, mapping, 0); 1280 if (xa_is_internal(entry)) { 1281 ret = xa_to_internal(entry); 1282 goto out; 1283 } 1284 1285 /* 1286 * It is possible, particularly with mixed reads & writes to private 1287 * mappings, that we have raced with a PMD fault that overlaps with 1288 * the PTE we need to set up. If so just return and the fault will be 1289 * retried. 1290 */ 1291 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1292 ret = VM_FAULT_NOPAGE; 1293 goto unlock_entry; 1294 } 1295 1296 /* 1297 * Note that we don't bother to use iomap_apply here: DAX required 1298 * the file system block size to be equal the page size, which means 1299 * that we never have to deal with more than a single extent here. 1300 */ 1301 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap, &srcmap); 1302 if (iomap_errp) 1303 *iomap_errp = error; 1304 if (error) { 1305 ret = dax_fault_return(error); 1306 goto unlock_entry; 1307 } 1308 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 1309 error = -EIO; /* fs corruption? */ 1310 goto error_finish_iomap; 1311 } 1312 1313 if (vmf->cow_page) { 1314 sector_t sector = dax_iomap_sector(&iomap, pos); 1315 1316 switch (iomap.type) { 1317 case IOMAP_HOLE: 1318 case IOMAP_UNWRITTEN: 1319 clear_user_highpage(vmf->cow_page, vaddr); 1320 break; 1321 case IOMAP_MAPPED: 1322 error = copy_cow_page_dax(iomap.bdev, iomap.dax_dev, 1323 sector, vmf->cow_page, vaddr); 1324 break; 1325 default: 1326 WARN_ON_ONCE(1); 1327 error = -EIO; 1328 break; 1329 } 1330 1331 if (error) 1332 goto error_finish_iomap; 1333 1334 __SetPageUptodate(vmf->cow_page); 1335 ret = finish_fault(vmf); 1336 if (!ret) 1337 ret = VM_FAULT_DONE_COW; 1338 goto finish_iomap; 1339 } 1340 1341 sync = dax_fault_is_synchronous(flags, vma, &iomap); 1342 1343 switch (iomap.type) { 1344 case IOMAP_MAPPED: 1345 if (iomap.flags & IOMAP_F_NEW) { 1346 count_vm_event(PGMAJFAULT); 1347 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 1348 major = VM_FAULT_MAJOR; 1349 } 1350 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn); 1351 if (error < 0) 1352 goto error_finish_iomap; 1353 1354 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, 1355 0, write && !sync); 1356 1357 /* 1358 * If we are doing synchronous page fault and inode needs fsync, 1359 * we can insert PTE into page tables only after that happens. 1360 * Skip insertion for now and return the pfn so that caller can 1361 * insert it after fsync is done. 1362 */ 1363 if (sync) { 1364 if (WARN_ON_ONCE(!pfnp)) { 1365 error = -EIO; 1366 goto error_finish_iomap; 1367 } 1368 *pfnp = pfn; 1369 ret = VM_FAULT_NEEDDSYNC | major; 1370 goto finish_iomap; 1371 } 1372 trace_dax_insert_mapping(inode, vmf, entry); 1373 if (write) 1374 ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn); 1375 else 1376 ret = vmf_insert_mixed(vma, vaddr, pfn); 1377 1378 goto finish_iomap; 1379 case IOMAP_UNWRITTEN: 1380 case IOMAP_HOLE: 1381 if (!write) { 1382 ret = dax_load_hole(&xas, mapping, &entry, vmf); 1383 goto finish_iomap; 1384 } 1385 fallthrough; 1386 default: 1387 WARN_ON_ONCE(1); 1388 error = -EIO; 1389 break; 1390 } 1391 1392 error_finish_iomap: 1393 ret = dax_fault_return(error); 1394 finish_iomap: 1395 if (ops->iomap_end) { 1396 int copied = PAGE_SIZE; 1397 1398 if (ret & VM_FAULT_ERROR) 1399 copied = 0; 1400 /* 1401 * The fault is done by now and there's no way back (other 1402 * thread may be already happily using PTE we have installed). 1403 * Just ignore error from ->iomap_end since we cannot do much 1404 * with it. 1405 */ 1406 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); 1407 } 1408 unlock_entry: 1409 dax_unlock_entry(&xas, entry); 1410 out: 1411 trace_dax_pte_fault_done(inode, vmf, ret); 1412 return ret | major; 1413 } 1414 1415 #ifdef CONFIG_FS_DAX_PMD 1416 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1417 struct iomap *iomap, void **entry) 1418 { 1419 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1420 unsigned long pmd_addr = vmf->address & PMD_MASK; 1421 struct vm_area_struct *vma = vmf->vma; 1422 struct inode *inode = mapping->host; 1423 pgtable_t pgtable = NULL; 1424 struct page *zero_page; 1425 spinlock_t *ptl; 1426 pmd_t pmd_entry; 1427 pfn_t pfn; 1428 1429 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1430 1431 if (unlikely(!zero_page)) 1432 goto fallback; 1433 1434 pfn = page_to_pfn_t(zero_page); 1435 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, 1436 DAX_PMD | DAX_ZERO_PAGE, false); 1437 1438 if (arch_needs_pgtable_deposit()) { 1439 pgtable = pte_alloc_one(vma->vm_mm); 1440 if (!pgtable) 1441 return VM_FAULT_OOM; 1442 } 1443 1444 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1445 if (!pmd_none(*(vmf->pmd))) { 1446 spin_unlock(ptl); 1447 goto fallback; 1448 } 1449 1450 if (pgtable) { 1451 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 1452 mm_inc_nr_ptes(vma->vm_mm); 1453 } 1454 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1455 pmd_entry = pmd_mkhuge(pmd_entry); 1456 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1457 spin_unlock(ptl); 1458 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); 1459 return VM_FAULT_NOPAGE; 1460 1461 fallback: 1462 if (pgtable) 1463 pte_free(vma->vm_mm, pgtable); 1464 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); 1465 return VM_FAULT_FALLBACK; 1466 } 1467 1468 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1469 const struct iomap_ops *ops) 1470 { 1471 struct vm_area_struct *vma = vmf->vma; 1472 struct address_space *mapping = vma->vm_file->f_mapping; 1473 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); 1474 unsigned long pmd_addr = vmf->address & PMD_MASK; 1475 bool write = vmf->flags & FAULT_FLAG_WRITE; 1476 bool sync; 1477 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; 1478 struct inode *inode = mapping->host; 1479 vm_fault_t result = VM_FAULT_FALLBACK; 1480 struct iomap iomap = { .type = IOMAP_HOLE }; 1481 struct iomap srcmap = { .type = IOMAP_HOLE }; 1482 pgoff_t max_pgoff; 1483 void *entry; 1484 loff_t pos; 1485 int error; 1486 pfn_t pfn; 1487 1488 /* 1489 * Check whether offset isn't beyond end of file now. Caller is 1490 * supposed to hold locks serializing us with truncate / punch hole so 1491 * this is a reliable test. 1492 */ 1493 max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1494 1495 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); 1496 1497 /* 1498 * Make sure that the faulting address's PMD offset (color) matches 1499 * the PMD offset from the start of the file. This is necessary so 1500 * that a PMD range in the page table overlaps exactly with a PMD 1501 * range in the page cache. 1502 */ 1503 if ((vmf->pgoff & PG_PMD_COLOUR) != 1504 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 1505 goto fallback; 1506 1507 /* Fall back to PTEs if we're going to COW */ 1508 if (write && !(vma->vm_flags & VM_SHARED)) 1509 goto fallback; 1510 1511 /* If the PMD would extend outside the VMA */ 1512 if (pmd_addr < vma->vm_start) 1513 goto fallback; 1514 if ((pmd_addr + PMD_SIZE) > vma->vm_end) 1515 goto fallback; 1516 1517 if (xas.xa_index >= max_pgoff) { 1518 result = VM_FAULT_SIGBUS; 1519 goto out; 1520 } 1521 1522 /* If the PMD would extend beyond the file size */ 1523 if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff) 1524 goto fallback; 1525 1526 /* 1527 * grab_mapping_entry() will make sure we get an empty PMD entry, 1528 * a zero PMD entry or a DAX PMD. If it can't (because a PTE 1529 * entry is already in the array, for instance), it will return 1530 * VM_FAULT_FALLBACK. 1531 */ 1532 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); 1533 if (xa_is_internal(entry)) { 1534 result = xa_to_internal(entry); 1535 goto fallback; 1536 } 1537 1538 /* 1539 * It is possible, particularly with mixed reads & writes to private 1540 * mappings, that we have raced with a PTE fault that overlaps with 1541 * the PMD we need to set up. If so just return and the fault will be 1542 * retried. 1543 */ 1544 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1545 !pmd_devmap(*vmf->pmd)) { 1546 result = 0; 1547 goto unlock_entry; 1548 } 1549 1550 /* 1551 * Note that we don't use iomap_apply here. We aren't doing I/O, only 1552 * setting up a mapping, so really we're using iomap_begin() as a way 1553 * to look up our filesystem block. 1554 */ 1555 pos = (loff_t)xas.xa_index << PAGE_SHIFT; 1556 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap, 1557 &srcmap); 1558 if (error) 1559 goto unlock_entry; 1560 1561 if (iomap.offset + iomap.length < pos + PMD_SIZE) 1562 goto finish_iomap; 1563 1564 sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap); 1565 1566 switch (iomap.type) { 1567 case IOMAP_MAPPED: 1568 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn); 1569 if (error < 0) 1570 goto finish_iomap; 1571 1572 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, 1573 DAX_PMD, write && !sync); 1574 1575 /* 1576 * If we are doing synchronous page fault and inode needs fsync, 1577 * we can insert PMD into page tables only after that happens. 1578 * Skip insertion for now and return the pfn so that caller can 1579 * insert it after fsync is done. 1580 */ 1581 if (sync) { 1582 if (WARN_ON_ONCE(!pfnp)) 1583 goto finish_iomap; 1584 *pfnp = pfn; 1585 result = VM_FAULT_NEEDDSYNC; 1586 goto finish_iomap; 1587 } 1588 1589 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); 1590 result = vmf_insert_pfn_pmd(vmf, pfn, write); 1591 break; 1592 case IOMAP_UNWRITTEN: 1593 case IOMAP_HOLE: 1594 if (WARN_ON_ONCE(write)) 1595 break; 1596 result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry); 1597 break; 1598 default: 1599 WARN_ON_ONCE(1); 1600 break; 1601 } 1602 1603 finish_iomap: 1604 if (ops->iomap_end) { 1605 int copied = PMD_SIZE; 1606 1607 if (result == VM_FAULT_FALLBACK) 1608 copied = 0; 1609 /* 1610 * The fault is done by now and there's no way back (other 1611 * thread may be already happily using PMD we have installed). 1612 * Just ignore error from ->iomap_end since we cannot do much 1613 * with it. 1614 */ 1615 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, 1616 &iomap); 1617 } 1618 unlock_entry: 1619 dax_unlock_entry(&xas, entry); 1620 fallback: 1621 if (result == VM_FAULT_FALLBACK) { 1622 split_huge_pmd(vma, vmf->pmd, vmf->address); 1623 count_vm_event(THP_FAULT_FALLBACK); 1624 } 1625 out: 1626 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); 1627 return result; 1628 } 1629 #else 1630 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1631 const struct iomap_ops *ops) 1632 { 1633 return VM_FAULT_FALLBACK; 1634 } 1635 #endif /* CONFIG_FS_DAX_PMD */ 1636 1637 /** 1638 * dax_iomap_fault - handle a page fault on a DAX file 1639 * @vmf: The description of the fault 1640 * @pe_size: Size of the page to fault in 1641 * @pfnp: PFN to insert for synchronous faults if fsync is required 1642 * @iomap_errp: Storage for detailed error code in case of error 1643 * @ops: Iomap ops passed from the file system 1644 * 1645 * When a page fault occurs, filesystems may call this helper in 1646 * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1647 * has done all the necessary locking for page fault to proceed 1648 * successfully. 1649 */ 1650 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1651 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) 1652 { 1653 switch (pe_size) { 1654 case PE_SIZE_PTE: 1655 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); 1656 case PE_SIZE_PMD: 1657 return dax_iomap_pmd_fault(vmf, pfnp, ops); 1658 default: 1659 return VM_FAULT_FALLBACK; 1660 } 1661 } 1662 EXPORT_SYMBOL_GPL(dax_iomap_fault); 1663 1664 /* 1665 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables 1666 * @vmf: The description of the fault 1667 * @pfn: PFN to insert 1668 * @order: Order of entry to insert. 1669 * 1670 * This function inserts a writeable PTE or PMD entry into the page tables 1671 * for an mmaped DAX file. It also marks the page cache entry as dirty. 1672 */ 1673 static vm_fault_t 1674 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) 1675 { 1676 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1677 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); 1678 void *entry; 1679 vm_fault_t ret; 1680 1681 xas_lock_irq(&xas); 1682 entry = get_unlocked_entry(&xas, order); 1683 /* Did we race with someone splitting entry or so? */ 1684 if (!entry || dax_is_conflict(entry) || 1685 (order == 0 && !dax_is_pte_entry(entry))) { 1686 put_unlocked_entry(&xas, entry); 1687 xas_unlock_irq(&xas); 1688 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 1689 VM_FAULT_NOPAGE); 1690 return VM_FAULT_NOPAGE; 1691 } 1692 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); 1693 dax_lock_entry(&xas, entry); 1694 xas_unlock_irq(&xas); 1695 if (order == 0) 1696 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 1697 #ifdef CONFIG_FS_DAX_PMD 1698 else if (order == PMD_ORDER) 1699 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); 1700 #endif 1701 else 1702 ret = VM_FAULT_FALLBACK; 1703 dax_unlock_entry(&xas, entry); 1704 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); 1705 return ret; 1706 } 1707 1708 /** 1709 * dax_finish_sync_fault - finish synchronous page fault 1710 * @vmf: The description of the fault 1711 * @pe_size: Size of entry to be inserted 1712 * @pfn: PFN to insert 1713 * 1714 * This function ensures that the file range touched by the page fault is 1715 * stored persistently on the media and handles inserting of appropriate page 1716 * table entry. 1717 */ 1718 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 1719 enum page_entry_size pe_size, pfn_t pfn) 1720 { 1721 int err; 1722 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; 1723 unsigned int order = pe_order(pe_size); 1724 size_t len = PAGE_SIZE << order; 1725 1726 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); 1727 if (err) 1728 return VM_FAULT_SIGBUS; 1729 return dax_insert_pfn_mkwrite(vmf, pfn, order); 1730 } 1731 EXPORT_SYMBOL_GPL(dax_finish_sync_fault); 1732