1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * fs/dax.c - Direct Access filesystem code 4 * Copyright (c) 2013-2014 Intel Corporation 5 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 6 * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 7 */ 8 9 #include <linux/atomic.h> 10 #include <linux/blkdev.h> 11 #include <linux/buffer_head.h> 12 #include <linux/dax.h> 13 #include <linux/fs.h> 14 #include <linux/highmem.h> 15 #include <linux/memcontrol.h> 16 #include <linux/mm.h> 17 #include <linux/mutex.h> 18 #include <linux/pagevec.h> 19 #include <linux/sched.h> 20 #include <linux/sched/signal.h> 21 #include <linux/uio.h> 22 #include <linux/vmstat.h> 23 #include <linux/pfn_t.h> 24 #include <linux/sizes.h> 25 #include <linux/mmu_notifier.h> 26 #include <linux/iomap.h> 27 #include <linux/rmap.h> 28 #include <asm/pgalloc.h> 29 30 #define CREATE_TRACE_POINTS 31 #include <trace/events/fs_dax.h> 32 33 /* We choose 4096 entries - same as per-zone page wait tables */ 34 #define DAX_WAIT_TABLE_BITS 12 35 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 36 37 /* The 'colour' (ie low bits) within a PMD of a page offset. */ 38 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 39 #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) 40 41 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 42 43 static int __init init_dax_wait_table(void) 44 { 45 int i; 46 47 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 48 init_waitqueue_head(wait_table + i); 49 return 0; 50 } 51 fs_initcall(init_dax_wait_table); 52 53 /* 54 * DAX pagecache entries use XArray value entries so they can't be mistaken 55 * for pages. We use one bit for locking, one bit for the entry size (PMD) 56 * and two more to tell us if the entry is a zero page or an empty entry that 57 * is just used for locking. In total four special bits. 58 * 59 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 60 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 61 * block allocation. 62 */ 63 #define DAX_SHIFT (4) 64 #define DAX_LOCKED (1UL << 0) 65 #define DAX_PMD (1UL << 1) 66 #define DAX_ZERO_PAGE (1UL << 2) 67 #define DAX_EMPTY (1UL << 3) 68 69 static unsigned long dax_to_pfn(void *entry) 70 { 71 return xa_to_value(entry) >> DAX_SHIFT; 72 } 73 74 static struct folio *dax_to_folio(void *entry) 75 { 76 return page_folio(pfn_to_page(dax_to_pfn(entry))); 77 } 78 79 static void *dax_make_entry(pfn_t pfn, unsigned long flags) 80 { 81 return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); 82 } 83 84 static bool dax_is_locked(void *entry) 85 { 86 return xa_to_value(entry) & DAX_LOCKED; 87 } 88 89 static unsigned int dax_entry_order(void *entry) 90 { 91 if (xa_to_value(entry) & DAX_PMD) 92 return PMD_ORDER; 93 return 0; 94 } 95 96 static unsigned long dax_is_pmd_entry(void *entry) 97 { 98 return xa_to_value(entry) & DAX_PMD; 99 } 100 101 static bool dax_is_pte_entry(void *entry) 102 { 103 return !(xa_to_value(entry) & DAX_PMD); 104 } 105 106 static int dax_is_zero_entry(void *entry) 107 { 108 return xa_to_value(entry) & DAX_ZERO_PAGE; 109 } 110 111 static int dax_is_empty_entry(void *entry) 112 { 113 return xa_to_value(entry) & DAX_EMPTY; 114 } 115 116 /* 117 * true if the entry that was found is of a smaller order than the entry 118 * we were looking for 119 */ 120 static bool dax_is_conflict(void *entry) 121 { 122 return entry == XA_RETRY_ENTRY; 123 } 124 125 /* 126 * DAX page cache entry locking 127 */ 128 struct exceptional_entry_key { 129 struct xarray *xa; 130 pgoff_t entry_start; 131 }; 132 133 struct wait_exceptional_entry_queue { 134 wait_queue_entry_t wait; 135 struct exceptional_entry_key key; 136 }; 137 138 /** 139 * enum dax_wake_mode: waitqueue wakeup behaviour 140 * @WAKE_ALL: wake all waiters in the waitqueue 141 * @WAKE_NEXT: wake only the first waiter in the waitqueue 142 */ 143 enum dax_wake_mode { 144 WAKE_ALL, 145 WAKE_NEXT, 146 }; 147 148 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, 149 void *entry, struct exceptional_entry_key *key) 150 { 151 unsigned long hash; 152 unsigned long index = xas->xa_index; 153 154 /* 155 * If 'entry' is a PMD, align the 'index' that we use for the wait 156 * queue to the start of that PMD. This ensures that all offsets in 157 * the range covered by the PMD map to the same bit lock. 158 */ 159 if (dax_is_pmd_entry(entry)) 160 index &= ~PG_PMD_COLOUR; 161 key->xa = xas->xa; 162 key->entry_start = index; 163 164 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); 165 return wait_table + hash; 166 } 167 168 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, 169 unsigned int mode, int sync, void *keyp) 170 { 171 struct exceptional_entry_key *key = keyp; 172 struct wait_exceptional_entry_queue *ewait = 173 container_of(wait, struct wait_exceptional_entry_queue, wait); 174 175 if (key->xa != ewait->key.xa || 176 key->entry_start != ewait->key.entry_start) 177 return 0; 178 return autoremove_wake_function(wait, mode, sync, NULL); 179 } 180 181 /* 182 * @entry may no longer be the entry at the index in the mapping. 183 * The important information it's conveying is whether the entry at 184 * this index used to be a PMD entry. 185 */ 186 static void dax_wake_entry(struct xa_state *xas, void *entry, 187 enum dax_wake_mode mode) 188 { 189 struct exceptional_entry_key key; 190 wait_queue_head_t *wq; 191 192 wq = dax_entry_waitqueue(xas, entry, &key); 193 194 /* 195 * Checking for locked entry and prepare_to_wait_exclusive() happens 196 * under the i_pages lock, ditto for entry handling in our callers. 197 * So at this point all tasks that could have seen our entry locked 198 * must be in the waitqueue and the following check will see them. 199 */ 200 if (waitqueue_active(wq)) 201 __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key); 202 } 203 204 /* 205 * Look up entry in page cache, wait for it to become unlocked if it 206 * is a DAX entry and return it. The caller must subsequently call 207 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry() 208 * if it did. The entry returned may have a larger order than @order. 209 * If @order is larger than the order of the entry found in i_pages, this 210 * function returns a dax_is_conflict entry. 211 * 212 * Must be called with the i_pages lock held. 213 */ 214 static void *get_next_unlocked_entry(struct xa_state *xas, unsigned int order) 215 { 216 void *entry; 217 struct wait_exceptional_entry_queue ewait; 218 wait_queue_head_t *wq; 219 220 init_wait(&ewait.wait); 221 ewait.wait.func = wake_exceptional_entry_func; 222 223 for (;;) { 224 entry = xas_find_conflict(xas); 225 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 226 return entry; 227 if (dax_entry_order(entry) < order) 228 return XA_RETRY_ENTRY; 229 if (!dax_is_locked(entry)) 230 return entry; 231 232 wq = dax_entry_waitqueue(xas, entry, &ewait.key); 233 prepare_to_wait_exclusive(wq, &ewait.wait, 234 TASK_UNINTERRUPTIBLE); 235 xas_unlock_irq(xas); 236 xas_reset(xas); 237 schedule(); 238 finish_wait(wq, &ewait.wait); 239 xas_lock_irq(xas); 240 } 241 } 242 243 /* 244 * Wait for the given entry to become unlocked. Caller must hold the i_pages 245 * lock and call either put_unlocked_entry() if it did not lock the entry or 246 * dax_unlock_entry() if it did. Returns an unlocked entry if still present. 247 */ 248 static void *wait_entry_unlocked_exclusive(struct xa_state *xas, void *entry) 249 { 250 struct wait_exceptional_entry_queue ewait; 251 wait_queue_head_t *wq; 252 253 init_wait(&ewait.wait); 254 ewait.wait.func = wake_exceptional_entry_func; 255 256 while (unlikely(dax_is_locked(entry))) { 257 wq = dax_entry_waitqueue(xas, entry, &ewait.key); 258 prepare_to_wait_exclusive(wq, &ewait.wait, 259 TASK_UNINTERRUPTIBLE); 260 xas_pause(xas); 261 xas_unlock_irq(xas); 262 schedule(); 263 finish_wait(wq, &ewait.wait); 264 xas_lock_irq(xas); 265 entry = xas_load(xas); 266 } 267 268 if (xa_is_internal(entry)) 269 return NULL; 270 271 return entry; 272 } 273 274 /* 275 * The only thing keeping the address space around is the i_pages lock 276 * (it's cycled in clear_inode() after removing the entries from i_pages) 277 * After we call xas_unlock_irq(), we cannot touch xas->xa. 278 */ 279 static void wait_entry_unlocked(struct xa_state *xas, void *entry) 280 { 281 struct wait_exceptional_entry_queue ewait; 282 wait_queue_head_t *wq; 283 284 init_wait(&ewait.wait); 285 ewait.wait.func = wake_exceptional_entry_func; 286 287 wq = dax_entry_waitqueue(xas, entry, &ewait.key); 288 /* 289 * Unlike get_next_unlocked_entry() there is no guarantee that this 290 * path ever successfully retrieves an unlocked entry before an 291 * inode dies. Perform a non-exclusive wait in case this path 292 * never successfully performs its own wake up. 293 */ 294 prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); 295 xas_unlock_irq(xas); 296 schedule(); 297 finish_wait(wq, &ewait.wait); 298 } 299 300 static void put_unlocked_entry(struct xa_state *xas, void *entry, 301 enum dax_wake_mode mode) 302 { 303 if (entry && !dax_is_conflict(entry)) 304 dax_wake_entry(xas, entry, mode); 305 } 306 307 /* 308 * We used the xa_state to get the entry, but then we locked the entry and 309 * dropped the xa_lock, so we know the xa_state is stale and must be reset 310 * before use. 311 */ 312 static void dax_unlock_entry(struct xa_state *xas, void *entry) 313 { 314 void *old; 315 316 BUG_ON(dax_is_locked(entry)); 317 xas_reset(xas); 318 xas_lock_irq(xas); 319 old = xas_store(xas, entry); 320 xas_unlock_irq(xas); 321 BUG_ON(!dax_is_locked(old)); 322 dax_wake_entry(xas, entry, WAKE_NEXT); 323 } 324 325 /* 326 * Return: The entry stored at this location before it was locked. 327 */ 328 static void *dax_lock_entry(struct xa_state *xas, void *entry) 329 { 330 unsigned long v = xa_to_value(entry); 331 return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); 332 } 333 334 static unsigned long dax_entry_size(void *entry) 335 { 336 if (dax_is_zero_entry(entry)) 337 return 0; 338 else if (dax_is_empty_entry(entry)) 339 return 0; 340 else if (dax_is_pmd_entry(entry)) 341 return PMD_SIZE; 342 else 343 return PAGE_SIZE; 344 } 345 346 /* 347 * A DAX folio is considered shared if it has no mapping set and ->share (which 348 * shares the ->index field) is non-zero. Note this may return false even if the 349 * page is shared between multiple files but has not yet actually been mapped 350 * into multiple address spaces. 351 */ 352 static inline bool dax_folio_is_shared(struct folio *folio) 353 { 354 return !folio->mapping && folio->share; 355 } 356 357 /* 358 * When it is called by dax_insert_entry(), the shared flag will indicate 359 * whether this entry is shared by multiple files. If the page has not 360 * previously been associated with any mappings the ->mapping and ->index 361 * fields will be set. If it has already been associated with a mapping 362 * the mapping will be cleared and the share count set. It's then up to 363 * reverse map users like memory_failure() to call back into the filesystem to 364 * recover ->mapping and ->index information. For example by implementing 365 * dax_holder_operations. 366 */ 367 static void dax_folio_make_shared(struct folio *folio) 368 { 369 /* 370 * folio is not currently shared so mark it as shared by clearing 371 * folio->mapping. 372 */ 373 folio->mapping = NULL; 374 375 /* 376 * folio has previously been mapped into one address space so set the 377 * share count. 378 */ 379 folio->share = 1; 380 } 381 382 static inline unsigned long dax_folio_put(struct folio *folio) 383 { 384 unsigned long ref; 385 int order, i; 386 387 if (!dax_folio_is_shared(folio)) 388 ref = 0; 389 else 390 ref = --folio->share; 391 392 if (ref) 393 return ref; 394 395 folio->mapping = NULL; 396 order = folio_order(folio); 397 if (!order) 398 return 0; 399 folio_reset_order(folio); 400 401 for (i = 0; i < (1UL << order); i++) { 402 struct dev_pagemap *pgmap = page_pgmap(&folio->page); 403 struct page *page = folio_page(folio, i); 404 struct folio *new_folio = (struct folio *)page; 405 406 ClearPageHead(page); 407 clear_compound_head(page); 408 409 new_folio->mapping = NULL; 410 /* 411 * Reset pgmap which was over-written by 412 * prep_compound_page(). 413 */ 414 new_folio->pgmap = pgmap; 415 new_folio->share = 0; 416 WARN_ON_ONCE(folio_ref_count(new_folio)); 417 } 418 419 return ref; 420 } 421 422 static void dax_folio_init(void *entry) 423 { 424 struct folio *folio = dax_to_folio(entry); 425 int order = dax_entry_order(entry); 426 427 /* 428 * Folio should have been split back to order-0 pages in 429 * dax_folio_put() when they were removed from their 430 * final mapping. 431 */ 432 WARN_ON_ONCE(folio_order(folio)); 433 434 if (order > 0) { 435 prep_compound_page(&folio->page, order); 436 if (order > 1) 437 INIT_LIST_HEAD(&folio->_deferred_list); 438 WARN_ON_ONCE(folio_ref_count(folio)); 439 } 440 } 441 442 static void dax_associate_entry(void *entry, struct address_space *mapping, 443 struct vm_area_struct *vma, 444 unsigned long address, bool shared) 445 { 446 unsigned long size = dax_entry_size(entry), index; 447 struct folio *folio = dax_to_folio(entry); 448 449 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) 450 return; 451 452 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 453 return; 454 455 index = linear_page_index(vma, address & ~(size - 1)); 456 if (shared && (folio->mapping || dax_folio_is_shared(folio))) { 457 if (folio->mapping) 458 dax_folio_make_shared(folio); 459 460 WARN_ON_ONCE(!folio->share); 461 WARN_ON_ONCE(dax_entry_order(entry) != folio_order(folio)); 462 folio->share++; 463 } else { 464 WARN_ON_ONCE(folio->mapping); 465 dax_folio_init(entry); 466 folio = dax_to_folio(entry); 467 folio->mapping = mapping; 468 folio->index = index; 469 } 470 } 471 472 static void dax_disassociate_entry(void *entry, struct address_space *mapping, 473 bool trunc) 474 { 475 struct folio *folio = dax_to_folio(entry); 476 477 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 478 return; 479 480 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) 481 return; 482 483 dax_folio_put(folio); 484 } 485 486 static struct page *dax_busy_page(void *entry) 487 { 488 struct folio *folio = dax_to_folio(entry); 489 490 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) 491 return NULL; 492 493 if (folio_ref_count(folio) - folio_mapcount(folio)) 494 return &folio->page; 495 else 496 return NULL; 497 } 498 499 /** 500 * dax_lock_folio - Lock the DAX entry corresponding to a folio 501 * @folio: The folio whose entry we want to lock 502 * 503 * Context: Process context. 504 * Return: A cookie to pass to dax_unlock_folio() or 0 if the entry could 505 * not be locked. 506 */ 507 dax_entry_t dax_lock_folio(struct folio *folio) 508 { 509 XA_STATE(xas, NULL, 0); 510 void *entry; 511 512 /* Ensure folio->mapping isn't freed while we look at it */ 513 rcu_read_lock(); 514 for (;;) { 515 struct address_space *mapping = READ_ONCE(folio->mapping); 516 517 entry = NULL; 518 if (!mapping || !dax_mapping(mapping)) 519 break; 520 521 /* 522 * In the device-dax case there's no need to lock, a 523 * struct dev_pagemap pin is sufficient to keep the 524 * inode alive, and we assume we have dev_pagemap pin 525 * otherwise we would not have a valid pfn_to_page() 526 * translation. 527 */ 528 entry = (void *)~0UL; 529 if (S_ISCHR(mapping->host->i_mode)) 530 break; 531 532 xas.xa = &mapping->i_pages; 533 xas_lock_irq(&xas); 534 if (mapping != folio->mapping) { 535 xas_unlock_irq(&xas); 536 continue; 537 } 538 xas_set(&xas, folio->index); 539 entry = xas_load(&xas); 540 if (dax_is_locked(entry)) { 541 rcu_read_unlock(); 542 wait_entry_unlocked(&xas, entry); 543 rcu_read_lock(); 544 continue; 545 } 546 dax_lock_entry(&xas, entry); 547 xas_unlock_irq(&xas); 548 break; 549 } 550 rcu_read_unlock(); 551 return (dax_entry_t)entry; 552 } 553 554 void dax_unlock_folio(struct folio *folio, dax_entry_t cookie) 555 { 556 struct address_space *mapping = folio->mapping; 557 XA_STATE(xas, &mapping->i_pages, folio->index); 558 559 if (S_ISCHR(mapping->host->i_mode)) 560 return; 561 562 dax_unlock_entry(&xas, (void *)cookie); 563 } 564 565 /* 566 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping 567 * @mapping: the file's mapping whose entry we want to lock 568 * @index: the offset within this file 569 * @page: output the dax page corresponding to this dax entry 570 * 571 * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry 572 * could not be locked. 573 */ 574 dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index, 575 struct page **page) 576 { 577 XA_STATE(xas, NULL, 0); 578 void *entry; 579 580 rcu_read_lock(); 581 for (;;) { 582 entry = NULL; 583 if (!dax_mapping(mapping)) 584 break; 585 586 xas.xa = &mapping->i_pages; 587 xas_lock_irq(&xas); 588 xas_set(&xas, index); 589 entry = xas_load(&xas); 590 if (dax_is_locked(entry)) { 591 rcu_read_unlock(); 592 wait_entry_unlocked(&xas, entry); 593 rcu_read_lock(); 594 continue; 595 } 596 if (!entry || 597 dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 598 /* 599 * Because we are looking for entry from file's mapping 600 * and index, so the entry may not be inserted for now, 601 * or even a zero/empty entry. We don't think this is 602 * an error case. So, return a special value and do 603 * not output @page. 604 */ 605 entry = (void *)~0UL; 606 } else { 607 *page = pfn_to_page(dax_to_pfn(entry)); 608 dax_lock_entry(&xas, entry); 609 } 610 xas_unlock_irq(&xas); 611 break; 612 } 613 rcu_read_unlock(); 614 return (dax_entry_t)entry; 615 } 616 617 void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index, 618 dax_entry_t cookie) 619 { 620 XA_STATE(xas, &mapping->i_pages, index); 621 622 if (cookie == ~0UL) 623 return; 624 625 dax_unlock_entry(&xas, (void *)cookie); 626 } 627 628 /* 629 * Find page cache entry at given index. If it is a DAX entry, return it 630 * with the entry locked. If the page cache doesn't contain an entry at 631 * that index, add a locked empty entry. 632 * 633 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will 634 * either return that locked entry or will return VM_FAULT_FALLBACK. 635 * This will happen if there are any PTE entries within the PMD range 636 * that we are requesting. 637 * 638 * We always favor PTE entries over PMD entries. There isn't a flow where we 639 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD 640 * insertion will fail if it finds any PTE entries already in the tree, and a 641 * PTE insertion will cause an existing PMD entry to be unmapped and 642 * downgraded to PTE entries. This happens for both PMD zero pages as 643 * well as PMD empty entries. 644 * 645 * The exception to this downgrade path is for PMD entries that have 646 * real storage backing them. We will leave these real PMD entries in 647 * the tree, and PTE writes will simply dirty the entire PMD entry. 648 * 649 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 650 * persistent memory the benefit is doubtful. We can add that later if we can 651 * show it helps. 652 * 653 * On error, this function does not return an ERR_PTR. Instead it returns 654 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values 655 * overlap with xarray value entries. 656 */ 657 static void *grab_mapping_entry(struct xa_state *xas, 658 struct address_space *mapping, unsigned int order) 659 { 660 unsigned long index = xas->xa_index; 661 bool pmd_downgrade; /* splitting PMD entry into PTE entries? */ 662 void *entry; 663 664 retry: 665 pmd_downgrade = false; 666 xas_lock_irq(xas); 667 entry = get_next_unlocked_entry(xas, order); 668 669 if (entry) { 670 if (dax_is_conflict(entry)) 671 goto fallback; 672 if (!xa_is_value(entry)) { 673 xas_set_err(xas, -EIO); 674 goto out_unlock; 675 } 676 677 if (order == 0) { 678 if (dax_is_pmd_entry(entry) && 679 (dax_is_zero_entry(entry) || 680 dax_is_empty_entry(entry))) { 681 pmd_downgrade = true; 682 } 683 } 684 } 685 686 if (pmd_downgrade) { 687 /* 688 * Make sure 'entry' remains valid while we drop 689 * the i_pages lock. 690 */ 691 dax_lock_entry(xas, entry); 692 693 /* 694 * Besides huge zero pages the only other thing that gets 695 * downgraded are empty entries which don't need to be 696 * unmapped. 697 */ 698 if (dax_is_zero_entry(entry)) { 699 xas_unlock_irq(xas); 700 unmap_mapping_pages(mapping, 701 xas->xa_index & ~PG_PMD_COLOUR, 702 PG_PMD_NR, false); 703 xas_reset(xas); 704 xas_lock_irq(xas); 705 } 706 707 dax_disassociate_entry(entry, mapping, false); 708 xas_store(xas, NULL); /* undo the PMD join */ 709 dax_wake_entry(xas, entry, WAKE_ALL); 710 mapping->nrpages -= PG_PMD_NR; 711 entry = NULL; 712 xas_set(xas, index); 713 } 714 715 if (entry) { 716 dax_lock_entry(xas, entry); 717 } else { 718 unsigned long flags = DAX_EMPTY; 719 720 if (order > 0) 721 flags |= DAX_PMD; 722 entry = dax_make_entry(pfn_to_pfn_t(0), flags); 723 dax_lock_entry(xas, entry); 724 if (xas_error(xas)) 725 goto out_unlock; 726 mapping->nrpages += 1UL << order; 727 } 728 729 out_unlock: 730 xas_unlock_irq(xas); 731 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) 732 goto retry; 733 if (xas->xa_node == XA_ERROR(-ENOMEM)) 734 return xa_mk_internal(VM_FAULT_OOM); 735 if (xas_error(xas)) 736 return xa_mk_internal(VM_FAULT_SIGBUS); 737 return entry; 738 fallback: 739 xas_unlock_irq(xas); 740 return xa_mk_internal(VM_FAULT_FALLBACK); 741 } 742 743 /** 744 * dax_layout_busy_page_range - find first pinned page in @mapping 745 * @mapping: address space to scan for a page with ref count > 1 746 * @start: Starting offset. Page containing 'start' is included. 747 * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX, 748 * pages from 'start' till the end of file are included. 749 * 750 * DAX requires ZONE_DEVICE mapped pages. These pages are never 751 * 'onlined' to the page allocator so they are considered idle when 752 * page->count == 1. A filesystem uses this interface to determine if 753 * any page in the mapping is busy, i.e. for DMA, or other 754 * get_user_pages() usages. 755 * 756 * It is expected that the filesystem is holding locks to block the 757 * establishment of new mappings in this address_space. I.e. it expects 758 * to be able to run unmap_mapping_range() and subsequently not race 759 * mapping_mapped() becoming true. 760 */ 761 struct page *dax_layout_busy_page_range(struct address_space *mapping, 762 loff_t start, loff_t end) 763 { 764 void *entry; 765 unsigned int scanned = 0; 766 struct page *page = NULL; 767 pgoff_t start_idx = start >> PAGE_SHIFT; 768 pgoff_t end_idx; 769 XA_STATE(xas, &mapping->i_pages, start_idx); 770 771 /* 772 * In the 'limited' case get_user_pages() for dax is disabled. 773 */ 774 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 775 return NULL; 776 777 if (!dax_mapping(mapping)) 778 return NULL; 779 780 /* If end == LLONG_MAX, all pages from start to till end of file */ 781 if (end == LLONG_MAX) 782 end_idx = ULONG_MAX; 783 else 784 end_idx = end >> PAGE_SHIFT; 785 /* 786 * If we race get_user_pages_fast() here either we'll see the 787 * elevated page count in the iteration and wait, or 788 * get_user_pages_fast() will see that the page it took a reference 789 * against is no longer mapped in the page tables and bail to the 790 * get_user_pages() slow path. The slow path is protected by 791 * pte_lock() and pmd_lock(). New references are not taken without 792 * holding those locks, and unmap_mapping_pages() will not zero the 793 * pte or pmd without holding the respective lock, so we are 794 * guaranteed to either see new references or prevent new 795 * references from being established. 796 */ 797 unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0); 798 799 xas_lock_irq(&xas); 800 xas_for_each(&xas, entry, end_idx) { 801 if (WARN_ON_ONCE(!xa_is_value(entry))) 802 continue; 803 entry = wait_entry_unlocked_exclusive(&xas, entry); 804 if (entry) 805 page = dax_busy_page(entry); 806 put_unlocked_entry(&xas, entry, WAKE_NEXT); 807 if (page) 808 break; 809 if (++scanned % XA_CHECK_SCHED) 810 continue; 811 812 xas_pause(&xas); 813 xas_unlock_irq(&xas); 814 cond_resched(); 815 xas_lock_irq(&xas); 816 } 817 xas_unlock_irq(&xas); 818 return page; 819 } 820 EXPORT_SYMBOL_GPL(dax_layout_busy_page_range); 821 822 struct page *dax_layout_busy_page(struct address_space *mapping) 823 { 824 return dax_layout_busy_page_range(mapping, 0, LLONG_MAX); 825 } 826 EXPORT_SYMBOL_GPL(dax_layout_busy_page); 827 828 static int __dax_invalidate_entry(struct address_space *mapping, 829 pgoff_t index, bool trunc) 830 { 831 XA_STATE(xas, &mapping->i_pages, index); 832 int ret = 0; 833 void *entry; 834 835 xas_lock_irq(&xas); 836 entry = get_next_unlocked_entry(&xas, 0); 837 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 838 goto out; 839 if (!trunc && 840 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || 841 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) 842 goto out; 843 dax_disassociate_entry(entry, mapping, trunc); 844 xas_store(&xas, NULL); 845 mapping->nrpages -= 1UL << dax_entry_order(entry); 846 ret = 1; 847 out: 848 put_unlocked_entry(&xas, entry, WAKE_ALL); 849 xas_unlock_irq(&xas); 850 return ret; 851 } 852 853 static int __dax_clear_dirty_range(struct address_space *mapping, 854 pgoff_t start, pgoff_t end) 855 { 856 XA_STATE(xas, &mapping->i_pages, start); 857 unsigned int scanned = 0; 858 void *entry; 859 860 xas_lock_irq(&xas); 861 xas_for_each(&xas, entry, end) { 862 entry = wait_entry_unlocked_exclusive(&xas, entry); 863 if (!entry) 864 continue; 865 xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); 866 xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); 867 put_unlocked_entry(&xas, entry, WAKE_NEXT); 868 869 if (++scanned % XA_CHECK_SCHED) 870 continue; 871 872 xas_pause(&xas); 873 xas_unlock_irq(&xas); 874 cond_resched(); 875 xas_lock_irq(&xas); 876 } 877 xas_unlock_irq(&xas); 878 879 return 0; 880 } 881 882 /* 883 * Delete DAX entry at @index from @mapping. Wait for it 884 * to be unlocked before deleting it. 885 */ 886 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 887 { 888 int ret = __dax_invalidate_entry(mapping, index, true); 889 890 /* 891 * This gets called from truncate / punch_hole path. As such, the caller 892 * must hold locks protecting against concurrent modifications of the 893 * page cache (usually fs-private i_mmap_sem for writing). Since the 894 * caller has seen a DAX entry for this index, we better find it 895 * at that index as well... 896 */ 897 WARN_ON_ONCE(!ret); 898 return ret; 899 } 900 901 void dax_delete_mapping_range(struct address_space *mapping, 902 loff_t start, loff_t end) 903 { 904 void *entry; 905 pgoff_t start_idx = start >> PAGE_SHIFT; 906 pgoff_t end_idx; 907 XA_STATE(xas, &mapping->i_pages, start_idx); 908 909 /* If end == LLONG_MAX, all pages from start to till end of file */ 910 if (end == LLONG_MAX) 911 end_idx = ULONG_MAX; 912 else 913 end_idx = end >> PAGE_SHIFT; 914 915 xas_lock_irq(&xas); 916 xas_for_each(&xas, entry, end_idx) { 917 if (!xa_is_value(entry)) 918 continue; 919 entry = wait_entry_unlocked_exclusive(&xas, entry); 920 if (!entry) 921 continue; 922 dax_disassociate_entry(entry, mapping, true); 923 xas_store(&xas, NULL); 924 mapping->nrpages -= 1UL << dax_entry_order(entry); 925 put_unlocked_entry(&xas, entry, WAKE_ALL); 926 } 927 xas_unlock_irq(&xas); 928 } 929 EXPORT_SYMBOL_GPL(dax_delete_mapping_range); 930 931 static int wait_page_idle(struct page *page, 932 void (cb)(struct inode *), 933 struct inode *inode) 934 { 935 return ___wait_var_event(page, dax_page_is_idle(page), 936 TASK_INTERRUPTIBLE, 0, 0, cb(inode)); 937 } 938 939 static void wait_page_idle_uninterruptible(struct page *page, 940 struct inode *inode) 941 { 942 ___wait_var_event(page, dax_page_is_idle(page), 943 TASK_UNINTERRUPTIBLE, 0, 0, schedule()); 944 } 945 946 /* 947 * Unmaps the inode and waits for any DMA to complete prior to deleting the 948 * DAX mapping entries for the range. 949 * 950 * For NOWAIT behavior, pass @cb as NULL to early-exit on first found 951 * busy page 952 */ 953 int dax_break_layout(struct inode *inode, loff_t start, loff_t end, 954 void (cb)(struct inode *)) 955 { 956 struct page *page; 957 int error = 0; 958 959 if (!dax_mapping(inode->i_mapping)) 960 return 0; 961 962 do { 963 page = dax_layout_busy_page_range(inode->i_mapping, start, end); 964 if (!page) 965 break; 966 if (!cb) { 967 error = -ERESTARTSYS; 968 break; 969 } 970 971 error = wait_page_idle(page, cb, inode); 972 } while (error == 0); 973 974 if (!page) 975 dax_delete_mapping_range(inode->i_mapping, start, end); 976 977 return error; 978 } 979 EXPORT_SYMBOL_GPL(dax_break_layout); 980 981 void dax_break_layout_final(struct inode *inode) 982 { 983 struct page *page; 984 985 if (!dax_mapping(inode->i_mapping)) 986 return; 987 988 do { 989 page = dax_layout_busy_page_range(inode->i_mapping, 0, 990 LLONG_MAX); 991 if (!page) 992 break; 993 994 wait_page_idle_uninterruptible(page, inode); 995 } while (true); 996 997 if (!page) 998 dax_delete_mapping_range(inode->i_mapping, 0, LLONG_MAX); 999 } 1000 EXPORT_SYMBOL_GPL(dax_break_layout_final); 1001 1002 /* 1003 * Invalidate DAX entry if it is clean. 1004 */ 1005 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 1006 pgoff_t index) 1007 { 1008 return __dax_invalidate_entry(mapping, index, false); 1009 } 1010 1011 static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos) 1012 { 1013 return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset); 1014 } 1015 1016 static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter) 1017 { 1018 pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos); 1019 void *vto, *kaddr; 1020 long rc; 1021 int id; 1022 1023 id = dax_read_lock(); 1024 rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS, 1025 &kaddr, NULL); 1026 if (rc < 0) { 1027 dax_read_unlock(id); 1028 return rc; 1029 } 1030 vto = kmap_atomic(vmf->cow_page); 1031 copy_user_page(vto, kaddr, vmf->address, vmf->cow_page); 1032 kunmap_atomic(vto); 1033 dax_read_unlock(id); 1034 return 0; 1035 } 1036 1037 /* 1038 * MAP_SYNC on a dax mapping guarantees dirty metadata is 1039 * flushed on write-faults (non-cow), but not read-faults. 1040 */ 1041 static bool dax_fault_is_synchronous(const struct iomap_iter *iter, 1042 struct vm_area_struct *vma) 1043 { 1044 return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) && 1045 (iter->iomap.flags & IOMAP_F_DIRTY); 1046 } 1047 1048 /* 1049 * By this point grab_mapping_entry() has ensured that we have a locked entry 1050 * of the appropriate size so we don't have to worry about downgrading PMDs to 1051 * PTEs. If we happen to be trying to insert a PTE and there is a PMD 1052 * already in the tree, we will skip the insertion and just dirty the PMD as 1053 * appropriate. 1054 */ 1055 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, 1056 const struct iomap_iter *iter, void *entry, pfn_t pfn, 1057 unsigned long flags) 1058 { 1059 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1060 void *new_entry = dax_make_entry(pfn, flags); 1061 bool write = iter->flags & IOMAP_WRITE; 1062 bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma); 1063 bool shared = iter->iomap.flags & IOMAP_F_SHARED; 1064 1065 if (dirty) 1066 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1067 1068 if (shared || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) { 1069 unsigned long index = xas->xa_index; 1070 /* we are replacing a zero page with block mapping */ 1071 if (dax_is_pmd_entry(entry)) 1072 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 1073 PG_PMD_NR, false); 1074 else /* pte entry */ 1075 unmap_mapping_pages(mapping, index, 1, false); 1076 } 1077 1078 xas_reset(xas); 1079 xas_lock_irq(xas); 1080 if (shared || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 1081 void *old; 1082 1083 dax_disassociate_entry(entry, mapping, false); 1084 dax_associate_entry(new_entry, mapping, vmf->vma, 1085 vmf->address, shared); 1086 1087 /* 1088 * Only swap our new entry into the page cache if the current 1089 * entry is a zero page or an empty entry. If a normal PTE or 1090 * PMD entry is already in the cache, we leave it alone. This 1091 * means that if we are trying to insert a PTE and the 1092 * existing entry is a PMD, we will just leave the PMD in the 1093 * tree and dirty it if necessary. 1094 */ 1095 old = dax_lock_entry(xas, new_entry); 1096 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | 1097 DAX_LOCKED)); 1098 entry = new_entry; 1099 } else { 1100 xas_load(xas); /* Walk the xa_state */ 1101 } 1102 1103 if (dirty) 1104 xas_set_mark(xas, PAGECACHE_TAG_DIRTY); 1105 1106 if (write && shared) 1107 xas_set_mark(xas, PAGECACHE_TAG_TOWRITE); 1108 1109 xas_unlock_irq(xas); 1110 return entry; 1111 } 1112 1113 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, 1114 struct address_space *mapping, void *entry) 1115 { 1116 unsigned long pfn, index, count, end; 1117 long ret = 0; 1118 struct vm_area_struct *vma; 1119 1120 /* 1121 * A page got tagged dirty in DAX mapping? Something is seriously 1122 * wrong. 1123 */ 1124 if (WARN_ON(!xa_is_value(entry))) 1125 return -EIO; 1126 1127 if (unlikely(dax_is_locked(entry))) { 1128 void *old_entry = entry; 1129 1130 entry = get_next_unlocked_entry(xas, 0); 1131 1132 /* Entry got punched out / reallocated? */ 1133 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) 1134 goto put_unlocked; 1135 /* 1136 * Entry got reallocated elsewhere? No need to writeback. 1137 * We have to compare pfns as we must not bail out due to 1138 * difference in lockbit or entry type. 1139 */ 1140 if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) 1141 goto put_unlocked; 1142 if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 1143 dax_is_zero_entry(entry))) { 1144 ret = -EIO; 1145 goto put_unlocked; 1146 } 1147 1148 /* Another fsync thread may have already done this entry */ 1149 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) 1150 goto put_unlocked; 1151 } 1152 1153 /* Lock the entry to serialize with page faults */ 1154 dax_lock_entry(xas, entry); 1155 1156 /* 1157 * We can clear the tag now but we have to be careful so that concurrent 1158 * dax_writeback_one() calls for the same index cannot finish before we 1159 * actually flush the caches. This is achieved as the calls will look 1160 * at the entry only under the i_pages lock and once they do that 1161 * they will see the entry locked and wait for it to unlock. 1162 */ 1163 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); 1164 xas_unlock_irq(xas); 1165 1166 /* 1167 * If dax_writeback_mapping_range() was given a wbc->range_start 1168 * in the middle of a PMD, the 'index' we use needs to be 1169 * aligned to the start of the PMD. 1170 * This allows us to flush for PMD_SIZE and not have to worry about 1171 * partial PMD writebacks. 1172 */ 1173 pfn = dax_to_pfn(entry); 1174 count = 1UL << dax_entry_order(entry); 1175 index = xas->xa_index & ~(count - 1); 1176 end = index + count - 1; 1177 1178 /* Walk all mappings of a given index of a file and writeprotect them */ 1179 i_mmap_lock_read(mapping); 1180 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) { 1181 pfn_mkclean_range(pfn, count, index, vma); 1182 cond_resched(); 1183 } 1184 i_mmap_unlock_read(mapping); 1185 1186 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE); 1187 /* 1188 * After we have flushed the cache, we can clear the dirty tag. There 1189 * cannot be new dirty data in the pfn after the flush has completed as 1190 * the pfn mappings are writeprotected and fault waits for mapping 1191 * entry lock. 1192 */ 1193 xas_reset(xas); 1194 xas_lock_irq(xas); 1195 xas_store(xas, entry); 1196 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); 1197 dax_wake_entry(xas, entry, WAKE_NEXT); 1198 1199 trace_dax_writeback_one(mapping->host, index, count); 1200 return ret; 1201 1202 put_unlocked: 1203 put_unlocked_entry(xas, entry, WAKE_NEXT); 1204 return ret; 1205 } 1206 1207 /* 1208 * Flush the mapping to the persistent domain within the byte range of [start, 1209 * end]. This is required by data integrity operations to ensure file data is 1210 * on persistent storage prior to completion of the operation. 1211 */ 1212 int dax_writeback_mapping_range(struct address_space *mapping, 1213 struct dax_device *dax_dev, struct writeback_control *wbc) 1214 { 1215 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); 1216 struct inode *inode = mapping->host; 1217 pgoff_t end_index = wbc->range_end >> PAGE_SHIFT; 1218 void *entry; 1219 int ret = 0; 1220 unsigned int scanned = 0; 1221 1222 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 1223 return -EIO; 1224 1225 if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL) 1226 return 0; 1227 1228 trace_dax_writeback_range(inode, xas.xa_index, end_index); 1229 1230 tag_pages_for_writeback(mapping, xas.xa_index, end_index); 1231 1232 xas_lock_irq(&xas); 1233 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { 1234 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); 1235 if (ret < 0) { 1236 mapping_set_error(mapping, ret); 1237 break; 1238 } 1239 if (++scanned % XA_CHECK_SCHED) 1240 continue; 1241 1242 xas_pause(&xas); 1243 xas_unlock_irq(&xas); 1244 cond_resched(); 1245 xas_lock_irq(&xas); 1246 } 1247 xas_unlock_irq(&xas); 1248 trace_dax_writeback_range_done(inode, xas.xa_index, end_index); 1249 return ret; 1250 } 1251 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 1252 1253 static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos, 1254 size_t size, void **kaddr, pfn_t *pfnp) 1255 { 1256 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1257 int id, rc = 0; 1258 long length; 1259 1260 id = dax_read_lock(); 1261 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 1262 DAX_ACCESS, kaddr, pfnp); 1263 if (length < 0) { 1264 rc = length; 1265 goto out; 1266 } 1267 if (!pfnp) 1268 goto out_check_addr; 1269 rc = -EINVAL; 1270 if (PFN_PHYS(length) < size) 1271 goto out; 1272 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 1273 goto out; 1274 1275 rc = 0; 1276 1277 out_check_addr: 1278 if (!kaddr) 1279 goto out; 1280 if (!*kaddr) 1281 rc = -EFAULT; 1282 out: 1283 dax_read_unlock(id); 1284 return rc; 1285 } 1286 1287 /** 1288 * dax_iomap_copy_around - Prepare for an unaligned write to a shared/cow page 1289 * by copying the data before and after the range to be written. 1290 * @pos: address to do copy from. 1291 * @length: size of copy operation. 1292 * @align_size: aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE) 1293 * @srcmap: iomap srcmap 1294 * @daddr: destination address to copy to. 1295 * 1296 * This can be called from two places. Either during DAX write fault (page 1297 * aligned), to copy the length size data to daddr. Or, while doing normal DAX 1298 * write operation, dax_iomap_iter() might call this to do the copy of either 1299 * start or end unaligned address. In the latter case the rest of the copy of 1300 * aligned ranges is taken care by dax_iomap_iter() itself. 1301 * If the srcmap contains invalid data, such as HOLE and UNWRITTEN, zero the 1302 * area to make sure no old data remains. 1303 */ 1304 static int dax_iomap_copy_around(loff_t pos, uint64_t length, size_t align_size, 1305 const struct iomap *srcmap, void *daddr) 1306 { 1307 loff_t head_off = pos & (align_size - 1); 1308 size_t size = ALIGN(head_off + length, align_size); 1309 loff_t end = pos + length; 1310 loff_t pg_end = round_up(end, align_size); 1311 /* copy_all is usually in page fault case */ 1312 bool copy_all = head_off == 0 && end == pg_end; 1313 /* zero the edges if srcmap is a HOLE or IOMAP_UNWRITTEN */ 1314 bool zero_edge = srcmap->flags & IOMAP_F_SHARED || 1315 srcmap->type == IOMAP_UNWRITTEN; 1316 void *saddr = NULL; 1317 int ret = 0; 1318 1319 if (!zero_edge) { 1320 ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL); 1321 if (ret) 1322 return dax_mem2blk_err(ret); 1323 } 1324 1325 if (copy_all) { 1326 if (zero_edge) 1327 memset(daddr, 0, size); 1328 else 1329 ret = copy_mc_to_kernel(daddr, saddr, length); 1330 goto out; 1331 } 1332 1333 /* Copy the head part of the range */ 1334 if (head_off) { 1335 if (zero_edge) 1336 memset(daddr, 0, head_off); 1337 else { 1338 ret = copy_mc_to_kernel(daddr, saddr, head_off); 1339 if (ret) 1340 return -EIO; 1341 } 1342 } 1343 1344 /* Copy the tail part of the range */ 1345 if (end < pg_end) { 1346 loff_t tail_off = head_off + length; 1347 loff_t tail_len = pg_end - end; 1348 1349 if (zero_edge) 1350 memset(daddr + tail_off, 0, tail_len); 1351 else { 1352 ret = copy_mc_to_kernel(daddr + tail_off, 1353 saddr + tail_off, tail_len); 1354 if (ret) 1355 return -EIO; 1356 } 1357 } 1358 out: 1359 if (zero_edge) 1360 dax_flush(srcmap->dax_dev, daddr, size); 1361 return ret ? -EIO : 0; 1362 } 1363 1364 /* 1365 * The user has performed a load from a hole in the file. Allocating a new 1366 * page in the file would cause excessive storage usage for workloads with 1367 * sparse files. Instead we insert a read-only mapping of the 4k zero page. 1368 * If this page is ever written to we will re-fault and change the mapping to 1369 * point to real DAX storage instead. 1370 */ 1371 static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1372 const struct iomap_iter *iter, void **entry) 1373 { 1374 struct inode *inode = iter->inode; 1375 unsigned long vaddr = vmf->address; 1376 pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); 1377 vm_fault_t ret; 1378 1379 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); 1380 1381 ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), false); 1382 trace_dax_load_hole(inode, vmf, ret); 1383 return ret; 1384 } 1385 1386 #ifdef CONFIG_FS_DAX_PMD 1387 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1388 const struct iomap_iter *iter, void **entry) 1389 { 1390 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1391 unsigned long pmd_addr = vmf->address & PMD_MASK; 1392 struct vm_area_struct *vma = vmf->vma; 1393 struct inode *inode = mapping->host; 1394 pgtable_t pgtable = NULL; 1395 struct folio *zero_folio; 1396 spinlock_t *ptl; 1397 pmd_t pmd_entry; 1398 pfn_t pfn; 1399 1400 zero_folio = mm_get_huge_zero_folio(vmf->vma->vm_mm); 1401 1402 if (unlikely(!zero_folio)) 1403 goto fallback; 1404 1405 pfn = page_to_pfn_t(&zero_folio->page); 1406 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, 1407 DAX_PMD | DAX_ZERO_PAGE); 1408 1409 if (arch_needs_pgtable_deposit()) { 1410 pgtable = pte_alloc_one(vma->vm_mm); 1411 if (!pgtable) 1412 return VM_FAULT_OOM; 1413 } 1414 1415 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1416 if (!pmd_none(*(vmf->pmd))) { 1417 spin_unlock(ptl); 1418 goto fallback; 1419 } 1420 1421 if (pgtable) { 1422 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 1423 mm_inc_nr_ptes(vma->vm_mm); 1424 } 1425 pmd_entry = mk_pmd(&zero_folio->page, vmf->vma->vm_page_prot); 1426 pmd_entry = pmd_mkhuge(pmd_entry); 1427 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1428 spin_unlock(ptl); 1429 trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry); 1430 return VM_FAULT_NOPAGE; 1431 1432 fallback: 1433 if (pgtable) 1434 pte_free(vma->vm_mm, pgtable); 1435 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry); 1436 return VM_FAULT_FALLBACK; 1437 } 1438 #else 1439 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, 1440 const struct iomap_iter *iter, void **entry) 1441 { 1442 return VM_FAULT_FALLBACK; 1443 } 1444 #endif /* CONFIG_FS_DAX_PMD */ 1445 1446 static int dax_unshare_iter(struct iomap_iter *iter) 1447 { 1448 struct iomap *iomap = &iter->iomap; 1449 const struct iomap *srcmap = iomap_iter_srcmap(iter); 1450 loff_t copy_pos = iter->pos; 1451 u64 copy_len = iomap_length(iter); 1452 u32 mod; 1453 int id = 0; 1454 s64 ret; 1455 void *daddr = NULL, *saddr = NULL; 1456 1457 if (!iomap_want_unshare_iter(iter)) 1458 return iomap_iter_advance_full(iter); 1459 1460 /* 1461 * Extend the file range to be aligned to fsblock/pagesize, because 1462 * we need to copy entire blocks, not just the byte range specified. 1463 * Invalidate the mapping because we're about to CoW. 1464 */ 1465 mod = offset_in_page(copy_pos); 1466 if (mod) { 1467 copy_len += mod; 1468 copy_pos -= mod; 1469 } 1470 1471 mod = offset_in_page(copy_pos + copy_len); 1472 if (mod) 1473 copy_len += PAGE_SIZE - mod; 1474 1475 invalidate_inode_pages2_range(iter->inode->i_mapping, 1476 copy_pos >> PAGE_SHIFT, 1477 (copy_pos + copy_len - 1) >> PAGE_SHIFT); 1478 1479 id = dax_read_lock(); 1480 ret = dax_iomap_direct_access(iomap, copy_pos, copy_len, &daddr, NULL); 1481 if (ret < 0) 1482 goto out_unlock; 1483 1484 ret = dax_iomap_direct_access(srcmap, copy_pos, copy_len, &saddr, NULL); 1485 if (ret < 0) 1486 goto out_unlock; 1487 1488 if (copy_mc_to_kernel(daddr, saddr, copy_len) != 0) 1489 ret = -EIO; 1490 1491 out_unlock: 1492 dax_read_unlock(id); 1493 if (ret < 0) 1494 return dax_mem2blk_err(ret); 1495 return iomap_iter_advance_full(iter); 1496 } 1497 1498 int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len, 1499 const struct iomap_ops *ops) 1500 { 1501 struct iomap_iter iter = { 1502 .inode = inode, 1503 .pos = pos, 1504 .flags = IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX, 1505 }; 1506 loff_t size = i_size_read(inode); 1507 int ret; 1508 1509 if (pos < 0 || pos >= size) 1510 return 0; 1511 1512 iter.len = min(len, size - pos); 1513 while ((ret = iomap_iter(&iter, ops)) > 0) 1514 iter.status = dax_unshare_iter(&iter); 1515 return ret; 1516 } 1517 EXPORT_SYMBOL_GPL(dax_file_unshare); 1518 1519 static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size) 1520 { 1521 const struct iomap *iomap = &iter->iomap; 1522 const struct iomap *srcmap = iomap_iter_srcmap(iter); 1523 unsigned offset = offset_in_page(pos); 1524 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1525 void *kaddr; 1526 long ret; 1527 1528 ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, 1529 NULL); 1530 if (ret < 0) 1531 return dax_mem2blk_err(ret); 1532 1533 memset(kaddr + offset, 0, size); 1534 if (iomap->flags & IOMAP_F_SHARED) 1535 ret = dax_iomap_copy_around(pos, size, PAGE_SIZE, srcmap, 1536 kaddr); 1537 else 1538 dax_flush(iomap->dax_dev, kaddr + offset, size); 1539 return ret; 1540 } 1541 1542 static int dax_zero_iter(struct iomap_iter *iter, bool *did_zero) 1543 { 1544 const struct iomap *iomap = &iter->iomap; 1545 const struct iomap *srcmap = iomap_iter_srcmap(iter); 1546 u64 length = iomap_length(iter); 1547 int ret; 1548 1549 /* already zeroed? we're done. */ 1550 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 1551 return iomap_iter_advance(iter, &length); 1552 1553 /* 1554 * invalidate the pages whose sharing state is to be changed 1555 * because of CoW. 1556 */ 1557 if (iomap->flags & IOMAP_F_SHARED) 1558 invalidate_inode_pages2_range(iter->inode->i_mapping, 1559 iter->pos >> PAGE_SHIFT, 1560 (iter->pos + length - 1) >> PAGE_SHIFT); 1561 1562 do { 1563 loff_t pos = iter->pos; 1564 unsigned offset = offset_in_page(pos); 1565 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1566 int id; 1567 1568 length = min_t(u64, PAGE_SIZE - offset, length); 1569 1570 id = dax_read_lock(); 1571 if (IS_ALIGNED(pos, PAGE_SIZE) && length == PAGE_SIZE) 1572 ret = dax_zero_page_range(iomap->dax_dev, pgoff, 1); 1573 else 1574 ret = dax_memzero(iter, pos, length); 1575 dax_read_unlock(id); 1576 1577 if (ret < 0) 1578 return ret; 1579 1580 ret = iomap_iter_advance(iter, &length); 1581 if (ret) 1582 return ret; 1583 } while (length > 0); 1584 1585 if (did_zero) 1586 *did_zero = true; 1587 return ret; 1588 } 1589 1590 int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 1591 const struct iomap_ops *ops) 1592 { 1593 struct iomap_iter iter = { 1594 .inode = inode, 1595 .pos = pos, 1596 .len = len, 1597 .flags = IOMAP_DAX | IOMAP_ZERO, 1598 }; 1599 int ret; 1600 1601 while ((ret = iomap_iter(&iter, ops)) > 0) 1602 iter.status = dax_zero_iter(&iter, did_zero); 1603 return ret; 1604 } 1605 EXPORT_SYMBOL_GPL(dax_zero_range); 1606 1607 int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 1608 const struct iomap_ops *ops) 1609 { 1610 unsigned int blocksize = i_blocksize(inode); 1611 unsigned int off = pos & (blocksize - 1); 1612 1613 /* Block boundary? Nothing to do */ 1614 if (!off) 1615 return 0; 1616 return dax_zero_range(inode, pos, blocksize - off, did_zero, ops); 1617 } 1618 EXPORT_SYMBOL_GPL(dax_truncate_page); 1619 1620 static int dax_iomap_iter(struct iomap_iter *iomi, struct iov_iter *iter) 1621 { 1622 const struct iomap *iomap = &iomi->iomap; 1623 const struct iomap *srcmap = iomap_iter_srcmap(iomi); 1624 loff_t length = iomap_length(iomi); 1625 loff_t pos = iomi->pos; 1626 struct dax_device *dax_dev = iomap->dax_dev; 1627 loff_t end = pos + length, done = 0; 1628 bool write = iov_iter_rw(iter) == WRITE; 1629 bool cow = write && iomap->flags & IOMAP_F_SHARED; 1630 ssize_t ret = 0; 1631 size_t xfer; 1632 int id; 1633 1634 if (!write) { 1635 end = min(end, i_size_read(iomi->inode)); 1636 if (pos >= end) 1637 return 0; 1638 1639 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) { 1640 done = iov_iter_zero(min(length, end - pos), iter); 1641 return iomap_iter_advance(iomi, &done); 1642 } 1643 } 1644 1645 /* 1646 * In DAX mode, enforce either pure overwrites of written extents, or 1647 * writes to unwritten extents as part of a copy-on-write operation. 1648 */ 1649 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED && 1650 !(iomap->flags & IOMAP_F_SHARED))) 1651 return -EIO; 1652 1653 /* 1654 * Write can allocate block for an area which has a hole page mapped 1655 * into page tables. We have to tear down these mappings so that data 1656 * written by write(2) is visible in mmap. 1657 */ 1658 if (iomap->flags & IOMAP_F_NEW || cow) { 1659 /* 1660 * Filesystem allows CoW on non-shared extents. The src extents 1661 * may have been mmapped with dirty mark before. To be able to 1662 * invalidate its dax entries, we need to clear the dirty mark 1663 * in advance. 1664 */ 1665 if (cow) 1666 __dax_clear_dirty_range(iomi->inode->i_mapping, 1667 pos >> PAGE_SHIFT, 1668 (end - 1) >> PAGE_SHIFT); 1669 invalidate_inode_pages2_range(iomi->inode->i_mapping, 1670 pos >> PAGE_SHIFT, 1671 (end - 1) >> PAGE_SHIFT); 1672 } 1673 1674 id = dax_read_lock(); 1675 while ((pos = iomi->pos) < end) { 1676 unsigned offset = pos & (PAGE_SIZE - 1); 1677 const size_t size = ALIGN(length + offset, PAGE_SIZE); 1678 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); 1679 ssize_t map_len; 1680 bool recovery = false; 1681 void *kaddr; 1682 1683 if (fatal_signal_pending(current)) { 1684 ret = -EINTR; 1685 break; 1686 } 1687 1688 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 1689 DAX_ACCESS, &kaddr, NULL); 1690 if (map_len == -EHWPOISON && iov_iter_rw(iter) == WRITE) { 1691 map_len = dax_direct_access(dax_dev, pgoff, 1692 PHYS_PFN(size), DAX_RECOVERY_WRITE, 1693 &kaddr, NULL); 1694 if (map_len > 0) 1695 recovery = true; 1696 } 1697 if (map_len < 0) { 1698 ret = dax_mem2blk_err(map_len); 1699 break; 1700 } 1701 1702 if (cow) { 1703 ret = dax_iomap_copy_around(pos, length, PAGE_SIZE, 1704 srcmap, kaddr); 1705 if (ret) 1706 break; 1707 } 1708 1709 map_len = PFN_PHYS(map_len); 1710 kaddr += offset; 1711 map_len -= offset; 1712 if (map_len > end - pos) 1713 map_len = end - pos; 1714 1715 if (recovery) 1716 xfer = dax_recovery_write(dax_dev, pgoff, kaddr, 1717 map_len, iter); 1718 else if (write) 1719 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1720 map_len, iter); 1721 else 1722 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, 1723 map_len, iter); 1724 1725 length = xfer; 1726 ret = iomap_iter_advance(iomi, &length); 1727 if (!ret && xfer == 0) 1728 ret = -EFAULT; 1729 if (xfer < map_len) 1730 break; 1731 } 1732 dax_read_unlock(id); 1733 1734 return ret; 1735 } 1736 1737 /** 1738 * dax_iomap_rw - Perform I/O to a DAX file 1739 * @iocb: The control block for this I/O 1740 * @iter: The addresses to do I/O from or to 1741 * @ops: iomap ops passed from the file system 1742 * 1743 * This function performs read and write operations to directly mapped 1744 * persistent memory. The callers needs to take care of read/write exclusion 1745 * and evicting any page cache pages in the region under I/O. 1746 */ 1747 ssize_t 1748 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 1749 const struct iomap_ops *ops) 1750 { 1751 struct iomap_iter iomi = { 1752 .inode = iocb->ki_filp->f_mapping->host, 1753 .pos = iocb->ki_pos, 1754 .len = iov_iter_count(iter), 1755 .flags = IOMAP_DAX, 1756 }; 1757 loff_t done = 0; 1758 int ret; 1759 1760 if (!iomi.len) 1761 return 0; 1762 1763 if (iov_iter_rw(iter) == WRITE) { 1764 lockdep_assert_held_write(&iomi.inode->i_rwsem); 1765 iomi.flags |= IOMAP_WRITE; 1766 } else { 1767 lockdep_assert_held(&iomi.inode->i_rwsem); 1768 } 1769 1770 if (iocb->ki_flags & IOCB_NOWAIT) 1771 iomi.flags |= IOMAP_NOWAIT; 1772 1773 while ((ret = iomap_iter(&iomi, ops)) > 0) 1774 iomi.status = dax_iomap_iter(&iomi, iter); 1775 1776 done = iomi.pos - iocb->ki_pos; 1777 iocb->ki_pos = iomi.pos; 1778 return done ? done : ret; 1779 } 1780 EXPORT_SYMBOL_GPL(dax_iomap_rw); 1781 1782 static vm_fault_t dax_fault_return(int error) 1783 { 1784 if (error == 0) 1785 return VM_FAULT_NOPAGE; 1786 return vmf_error(error); 1787 } 1788 1789 /* 1790 * When handling a synchronous page fault and the inode need a fsync, we can 1791 * insert the PTE/PMD into page tables only after that fsync happened. Skip 1792 * insertion for now and return the pfn so that caller can insert it after the 1793 * fsync is done. 1794 */ 1795 static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn) 1796 { 1797 if (WARN_ON_ONCE(!pfnp)) 1798 return VM_FAULT_SIGBUS; 1799 *pfnp = pfn; 1800 return VM_FAULT_NEEDDSYNC; 1801 } 1802 1803 static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf, 1804 const struct iomap_iter *iter) 1805 { 1806 vm_fault_t ret; 1807 int error = 0; 1808 1809 switch (iter->iomap.type) { 1810 case IOMAP_HOLE: 1811 case IOMAP_UNWRITTEN: 1812 clear_user_highpage(vmf->cow_page, vmf->address); 1813 break; 1814 case IOMAP_MAPPED: 1815 error = copy_cow_page_dax(vmf, iter); 1816 break; 1817 default: 1818 WARN_ON_ONCE(1); 1819 error = -EIO; 1820 break; 1821 } 1822 1823 if (error) 1824 return dax_fault_return(error); 1825 1826 __SetPageUptodate(vmf->cow_page); 1827 ret = finish_fault(vmf); 1828 if (!ret) 1829 return VM_FAULT_DONE_COW; 1830 return ret; 1831 } 1832 1833 /** 1834 * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault. 1835 * @vmf: vm fault instance 1836 * @iter: iomap iter 1837 * @pfnp: pfn to be returned 1838 * @xas: the dax mapping tree of a file 1839 * @entry: an unlocked dax entry to be inserted 1840 * @pmd: distinguish whether it is a pmd fault 1841 */ 1842 static vm_fault_t dax_fault_iter(struct vm_fault *vmf, 1843 const struct iomap_iter *iter, pfn_t *pfnp, 1844 struct xa_state *xas, void **entry, bool pmd) 1845 { 1846 const struct iomap *iomap = &iter->iomap; 1847 const struct iomap *srcmap = iomap_iter_srcmap(iter); 1848 size_t size = pmd ? PMD_SIZE : PAGE_SIZE; 1849 loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; 1850 bool write = iter->flags & IOMAP_WRITE; 1851 unsigned long entry_flags = pmd ? DAX_PMD : 0; 1852 struct folio *folio; 1853 int ret, err = 0; 1854 pfn_t pfn; 1855 void *kaddr; 1856 1857 if (!pmd && vmf->cow_page) 1858 return dax_fault_cow_page(vmf, iter); 1859 1860 /* if we are reading UNWRITTEN and HOLE, return a hole. */ 1861 if (!write && 1862 (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) { 1863 if (!pmd) 1864 return dax_load_hole(xas, vmf, iter, entry); 1865 return dax_pmd_load_hole(xas, vmf, iter, entry); 1866 } 1867 1868 if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) { 1869 WARN_ON_ONCE(1); 1870 return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS; 1871 } 1872 1873 err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn); 1874 if (err) 1875 return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err); 1876 1877 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags); 1878 1879 if (write && iomap->flags & IOMAP_F_SHARED) { 1880 err = dax_iomap_copy_around(pos, size, size, srcmap, kaddr); 1881 if (err) 1882 return dax_fault_return(err); 1883 } 1884 1885 folio = dax_to_folio(*entry); 1886 if (dax_fault_is_synchronous(iter, vmf->vma)) 1887 return dax_fault_synchronous_pfnp(pfnp, pfn); 1888 1889 folio_ref_inc(folio); 1890 if (pmd) 1891 ret = vmf_insert_folio_pmd(vmf, pfn_folio(pfn_t_to_pfn(pfn)), 1892 write); 1893 else 1894 ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), write); 1895 folio_put(folio); 1896 1897 return ret; 1898 } 1899 1900 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1901 int *iomap_errp, const struct iomap_ops *ops) 1902 { 1903 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1904 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); 1905 struct iomap_iter iter = { 1906 .inode = mapping->host, 1907 .pos = (loff_t)vmf->pgoff << PAGE_SHIFT, 1908 .len = PAGE_SIZE, 1909 .flags = IOMAP_DAX | IOMAP_FAULT, 1910 }; 1911 vm_fault_t ret = 0; 1912 void *entry; 1913 int error; 1914 1915 trace_dax_pte_fault(iter.inode, vmf, ret); 1916 /* 1917 * Check whether offset isn't beyond end of file now. Caller is supposed 1918 * to hold locks serializing us with truncate / punch hole so this is 1919 * a reliable test. 1920 */ 1921 if (iter.pos >= i_size_read(iter.inode)) { 1922 ret = VM_FAULT_SIGBUS; 1923 goto out; 1924 } 1925 1926 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) 1927 iter.flags |= IOMAP_WRITE; 1928 1929 entry = grab_mapping_entry(&xas, mapping, 0); 1930 if (xa_is_internal(entry)) { 1931 ret = xa_to_internal(entry); 1932 goto out; 1933 } 1934 1935 /* 1936 * It is possible, particularly with mixed reads & writes to private 1937 * mappings, that we have raced with a PMD fault that overlaps with 1938 * the PTE we need to set up. If so just return and the fault will be 1939 * retried. 1940 */ 1941 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1942 ret = VM_FAULT_NOPAGE; 1943 goto unlock_entry; 1944 } 1945 1946 while ((error = iomap_iter(&iter, ops)) > 0) { 1947 if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) { 1948 iter.status = -EIO; /* fs corruption? */ 1949 continue; 1950 } 1951 1952 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false); 1953 if (ret != VM_FAULT_SIGBUS && 1954 (iter.iomap.flags & IOMAP_F_NEW)) { 1955 count_vm_event(PGMAJFAULT); 1956 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); 1957 ret |= VM_FAULT_MAJOR; 1958 } 1959 1960 if (!(ret & VM_FAULT_ERROR)) { 1961 u64 length = PAGE_SIZE; 1962 iter.status = iomap_iter_advance(&iter, &length); 1963 } 1964 } 1965 1966 if (iomap_errp) 1967 *iomap_errp = error; 1968 if (!ret && error) 1969 ret = dax_fault_return(error); 1970 1971 unlock_entry: 1972 dax_unlock_entry(&xas, entry); 1973 out: 1974 trace_dax_pte_fault_done(iter.inode, vmf, ret); 1975 return ret; 1976 } 1977 1978 #ifdef CONFIG_FS_DAX_PMD 1979 static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, 1980 pgoff_t max_pgoff) 1981 { 1982 unsigned long pmd_addr = vmf->address & PMD_MASK; 1983 bool write = vmf->flags & FAULT_FLAG_WRITE; 1984 1985 /* 1986 * Make sure that the faulting address's PMD offset (color) matches 1987 * the PMD offset from the start of the file. This is necessary so 1988 * that a PMD range in the page table overlaps exactly with a PMD 1989 * range in the page cache. 1990 */ 1991 if ((vmf->pgoff & PG_PMD_COLOUR) != 1992 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 1993 return true; 1994 1995 /* Fall back to PTEs if we're going to COW */ 1996 if (write && !(vmf->vma->vm_flags & VM_SHARED)) 1997 return true; 1998 1999 /* If the PMD would extend outside the VMA */ 2000 if (pmd_addr < vmf->vma->vm_start) 2001 return true; 2002 if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end) 2003 return true; 2004 2005 /* If the PMD would extend beyond the file size */ 2006 if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff) 2007 return true; 2008 2009 return false; 2010 } 2011 2012 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 2013 const struct iomap_ops *ops) 2014 { 2015 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 2016 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); 2017 struct iomap_iter iter = { 2018 .inode = mapping->host, 2019 .len = PMD_SIZE, 2020 .flags = IOMAP_DAX | IOMAP_FAULT, 2021 }; 2022 vm_fault_t ret = VM_FAULT_FALLBACK; 2023 pgoff_t max_pgoff; 2024 void *entry; 2025 2026 if (vmf->flags & FAULT_FLAG_WRITE) 2027 iter.flags |= IOMAP_WRITE; 2028 2029 /* 2030 * Check whether offset isn't beyond end of file now. Caller is 2031 * supposed to hold locks serializing us with truncate / punch hole so 2032 * this is a reliable test. 2033 */ 2034 max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE); 2035 2036 trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0); 2037 2038 if (xas.xa_index >= max_pgoff) { 2039 ret = VM_FAULT_SIGBUS; 2040 goto out; 2041 } 2042 2043 if (dax_fault_check_fallback(vmf, &xas, max_pgoff)) 2044 goto fallback; 2045 2046 /* 2047 * grab_mapping_entry() will make sure we get an empty PMD entry, 2048 * a zero PMD entry or a DAX PMD. If it can't (because a PTE 2049 * entry is already in the array, for instance), it will return 2050 * VM_FAULT_FALLBACK. 2051 */ 2052 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); 2053 if (xa_is_internal(entry)) { 2054 ret = xa_to_internal(entry); 2055 goto fallback; 2056 } 2057 2058 /* 2059 * It is possible, particularly with mixed reads & writes to private 2060 * mappings, that we have raced with a PTE fault that overlaps with 2061 * the PMD we need to set up. If so just return and the fault will be 2062 * retried. 2063 */ 2064 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 2065 !pmd_devmap(*vmf->pmd)) { 2066 ret = 0; 2067 goto unlock_entry; 2068 } 2069 2070 iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT; 2071 while (iomap_iter(&iter, ops) > 0) { 2072 if (iomap_length(&iter) < PMD_SIZE) 2073 continue; /* actually breaks out of the loop */ 2074 2075 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true); 2076 if (ret != VM_FAULT_FALLBACK) { 2077 u64 length = PMD_SIZE; 2078 iter.status = iomap_iter_advance(&iter, &length); 2079 } 2080 } 2081 2082 unlock_entry: 2083 dax_unlock_entry(&xas, entry); 2084 fallback: 2085 if (ret == VM_FAULT_FALLBACK) { 2086 split_huge_pmd(vmf->vma, vmf->pmd, vmf->address); 2087 count_vm_event(THP_FAULT_FALLBACK); 2088 } 2089 out: 2090 trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret); 2091 return ret; 2092 } 2093 #else 2094 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 2095 const struct iomap_ops *ops) 2096 { 2097 return VM_FAULT_FALLBACK; 2098 } 2099 #endif /* CONFIG_FS_DAX_PMD */ 2100 2101 /** 2102 * dax_iomap_fault - handle a page fault on a DAX file 2103 * @vmf: The description of the fault 2104 * @order: Order of the page to fault in 2105 * @pfnp: PFN to insert for synchronous faults if fsync is required 2106 * @iomap_errp: Storage for detailed error code in case of error 2107 * @ops: Iomap ops passed from the file system 2108 * 2109 * When a page fault occurs, filesystems may call this helper in 2110 * their fault handler for DAX files. dax_iomap_fault() assumes the caller 2111 * has done all the necessary locking for page fault to proceed 2112 * successfully. 2113 */ 2114 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order, 2115 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) 2116 { 2117 if (order == 0) 2118 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); 2119 else if (order == PMD_ORDER) 2120 return dax_iomap_pmd_fault(vmf, pfnp, ops); 2121 else 2122 return VM_FAULT_FALLBACK; 2123 } 2124 EXPORT_SYMBOL_GPL(dax_iomap_fault); 2125 2126 /* 2127 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables 2128 * @vmf: The description of the fault 2129 * @pfn: PFN to insert 2130 * @order: Order of entry to insert. 2131 * 2132 * This function inserts a writeable PTE or PMD entry into the page tables 2133 * for an mmaped DAX file. It also marks the page cache entry as dirty. 2134 */ 2135 static vm_fault_t 2136 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) 2137 { 2138 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 2139 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); 2140 struct folio *folio; 2141 void *entry; 2142 vm_fault_t ret; 2143 2144 xas_lock_irq(&xas); 2145 entry = get_next_unlocked_entry(&xas, order); 2146 /* Did we race with someone splitting entry or so? */ 2147 if (!entry || dax_is_conflict(entry) || 2148 (order == 0 && !dax_is_pte_entry(entry))) { 2149 put_unlocked_entry(&xas, entry, WAKE_NEXT); 2150 xas_unlock_irq(&xas); 2151 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 2152 VM_FAULT_NOPAGE); 2153 return VM_FAULT_NOPAGE; 2154 } 2155 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); 2156 dax_lock_entry(&xas, entry); 2157 xas_unlock_irq(&xas); 2158 folio = pfn_folio(pfn_t_to_pfn(pfn)); 2159 folio_ref_inc(folio); 2160 if (order == 0) 2161 ret = vmf_insert_page_mkwrite(vmf, &folio->page, true); 2162 #ifdef CONFIG_FS_DAX_PMD 2163 else if (order == PMD_ORDER) 2164 ret = vmf_insert_folio_pmd(vmf, folio, FAULT_FLAG_WRITE); 2165 #endif 2166 else 2167 ret = VM_FAULT_FALLBACK; 2168 folio_put(folio); 2169 dax_unlock_entry(&xas, entry); 2170 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); 2171 return ret; 2172 } 2173 2174 /** 2175 * dax_finish_sync_fault - finish synchronous page fault 2176 * @vmf: The description of the fault 2177 * @order: Order of entry to be inserted 2178 * @pfn: PFN to insert 2179 * 2180 * This function ensures that the file range touched by the page fault is 2181 * stored persistently on the media and handles inserting of appropriate page 2182 * table entry. 2183 */ 2184 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order, 2185 pfn_t pfn) 2186 { 2187 int err; 2188 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; 2189 size_t len = PAGE_SIZE << order; 2190 2191 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); 2192 if (err) 2193 return VM_FAULT_SIGBUS; 2194 return dax_insert_pfn_mkwrite(vmf, pfn, order); 2195 } 2196 EXPORT_SYMBOL_GPL(dax_finish_sync_fault); 2197 2198 static int dax_range_compare_iter(struct iomap_iter *it_src, 2199 struct iomap_iter *it_dest, u64 len, bool *same) 2200 { 2201 const struct iomap *smap = &it_src->iomap; 2202 const struct iomap *dmap = &it_dest->iomap; 2203 loff_t pos1 = it_src->pos, pos2 = it_dest->pos; 2204 u64 dest_len; 2205 void *saddr, *daddr; 2206 int id, ret; 2207 2208 len = min(len, min(smap->length, dmap->length)); 2209 2210 if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) { 2211 *same = true; 2212 goto advance; 2213 } 2214 2215 if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) { 2216 *same = false; 2217 return 0; 2218 } 2219 2220 id = dax_read_lock(); 2221 ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE), 2222 &saddr, NULL); 2223 if (ret < 0) 2224 goto out_unlock; 2225 2226 ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE), 2227 &daddr, NULL); 2228 if (ret < 0) 2229 goto out_unlock; 2230 2231 *same = !memcmp(saddr, daddr, len); 2232 if (!*same) 2233 len = 0; 2234 dax_read_unlock(id); 2235 2236 advance: 2237 dest_len = len; 2238 ret = iomap_iter_advance(it_src, &len); 2239 if (!ret) 2240 ret = iomap_iter_advance(it_dest, &dest_len); 2241 return ret; 2242 2243 out_unlock: 2244 dax_read_unlock(id); 2245 return -EIO; 2246 } 2247 2248 int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff, 2249 struct inode *dst, loff_t dstoff, loff_t len, bool *same, 2250 const struct iomap_ops *ops) 2251 { 2252 struct iomap_iter src_iter = { 2253 .inode = src, 2254 .pos = srcoff, 2255 .len = len, 2256 .flags = IOMAP_DAX, 2257 }; 2258 struct iomap_iter dst_iter = { 2259 .inode = dst, 2260 .pos = dstoff, 2261 .len = len, 2262 .flags = IOMAP_DAX, 2263 }; 2264 int ret, status; 2265 2266 while ((ret = iomap_iter(&src_iter, ops)) > 0 && 2267 (ret = iomap_iter(&dst_iter, ops)) > 0) { 2268 status = dax_range_compare_iter(&src_iter, &dst_iter, 2269 min(src_iter.len, dst_iter.len), same); 2270 if (status < 0) 2271 return ret; 2272 src_iter.status = dst_iter.status = status; 2273 } 2274 return ret; 2275 } 2276 2277 int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in, 2278 struct file *file_out, loff_t pos_out, 2279 loff_t *len, unsigned int remap_flags, 2280 const struct iomap_ops *ops) 2281 { 2282 return __generic_remap_file_range_prep(file_in, pos_in, file_out, 2283 pos_out, len, remap_flags, ops); 2284 } 2285 EXPORT_SYMBOL_GPL(dax_remap_file_range_prep); 2286