1 /* 2 * fs/dax.c - Direct Access filesystem code 3 * Copyright (c) 2013-2014 Intel Corporation 4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 */ 16 17 #include <linux/atomic.h> 18 #include <linux/blkdev.h> 19 #include <linux/buffer_head.h> 20 #include <linux/dax.h> 21 #include <linux/fs.h> 22 #include <linux/genhd.h> 23 #include <linux/highmem.h> 24 #include <linux/memcontrol.h> 25 #include <linux/mm.h> 26 #include <linux/mutex.h> 27 #include <linux/pagevec.h> 28 #include <linux/sched.h> 29 #include <linux/sched/signal.h> 30 #include <linux/uio.h> 31 #include <linux/vmstat.h> 32 #include <linux/pfn_t.h> 33 #include <linux/sizes.h> 34 #include <linux/mmu_notifier.h> 35 #include <linux/iomap.h> 36 #include "internal.h" 37 38 #define CREATE_TRACE_POINTS 39 #include <trace/events/fs_dax.h> 40 41 /* We choose 4096 entries - same as per-zone page wait tables */ 42 #define DAX_WAIT_TABLE_BITS 12 43 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) 44 45 /* The 'colour' (ie low bits) within a PMD of a page offset. */ 46 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 47 #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) 48 49 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 50 51 static int __init init_dax_wait_table(void) 52 { 53 int i; 54 55 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) 56 init_waitqueue_head(wait_table + i); 57 return 0; 58 } 59 fs_initcall(init_dax_wait_table); 60 61 /* 62 * We use lowest available bit in exceptional entry for locking, one bit for 63 * the entry size (PMD) and two more to tell us if the entry is a zero page or 64 * an empty entry that is just used for locking. In total four special bits. 65 * 66 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE 67 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem 68 * block allocation. 69 */ 70 #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4) 71 #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) 72 #define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1)) 73 #define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2)) 74 #define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)) 75 76 static unsigned long dax_radix_pfn(void *entry) 77 { 78 return (unsigned long)entry >> RADIX_DAX_SHIFT; 79 } 80 81 static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags) 82 { 83 return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags | 84 (pfn << RADIX_DAX_SHIFT) | RADIX_DAX_ENTRY_LOCK); 85 } 86 87 static unsigned int dax_radix_order(void *entry) 88 { 89 if ((unsigned long)entry & RADIX_DAX_PMD) 90 return PMD_SHIFT - PAGE_SHIFT; 91 return 0; 92 } 93 94 static int dax_is_pmd_entry(void *entry) 95 { 96 return (unsigned long)entry & RADIX_DAX_PMD; 97 } 98 99 static int dax_is_pte_entry(void *entry) 100 { 101 return !((unsigned long)entry & RADIX_DAX_PMD); 102 } 103 104 static int dax_is_zero_entry(void *entry) 105 { 106 return (unsigned long)entry & RADIX_DAX_ZERO_PAGE; 107 } 108 109 static int dax_is_empty_entry(void *entry) 110 { 111 return (unsigned long)entry & RADIX_DAX_EMPTY; 112 } 113 114 /* 115 * DAX radix tree locking 116 */ 117 struct exceptional_entry_key { 118 struct address_space *mapping; 119 pgoff_t entry_start; 120 }; 121 122 struct wait_exceptional_entry_queue { 123 wait_queue_entry_t wait; 124 struct exceptional_entry_key key; 125 }; 126 127 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, 128 pgoff_t index, void *entry, struct exceptional_entry_key *key) 129 { 130 unsigned long hash; 131 132 /* 133 * If 'entry' is a PMD, align the 'index' that we use for the wait 134 * queue to the start of that PMD. This ensures that all offsets in 135 * the range covered by the PMD map to the same bit lock. 136 */ 137 if (dax_is_pmd_entry(entry)) 138 index &= ~PG_PMD_COLOUR; 139 140 key->mapping = mapping; 141 key->entry_start = index; 142 143 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS); 144 return wait_table + hash; 145 } 146 147 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode, 148 int sync, void *keyp) 149 { 150 struct exceptional_entry_key *key = keyp; 151 struct wait_exceptional_entry_queue *ewait = 152 container_of(wait, struct wait_exceptional_entry_queue, wait); 153 154 if (key->mapping != ewait->key.mapping || 155 key->entry_start != ewait->key.entry_start) 156 return 0; 157 return autoremove_wake_function(wait, mode, sync, NULL); 158 } 159 160 /* 161 * @entry may no longer be the entry at the index in the mapping. 162 * The important information it's conveying is whether the entry at 163 * this index used to be a PMD entry. 164 */ 165 static void dax_wake_mapping_entry_waiter(struct address_space *mapping, 166 pgoff_t index, void *entry, bool wake_all) 167 { 168 struct exceptional_entry_key key; 169 wait_queue_head_t *wq; 170 171 wq = dax_entry_waitqueue(mapping, index, entry, &key); 172 173 /* 174 * Checking for locked entry and prepare_to_wait_exclusive() happens 175 * under the i_pages lock, ditto for entry handling in our callers. 176 * So at this point all tasks that could have seen our entry locked 177 * must be in the waitqueue and the following check will see them. 178 */ 179 if (waitqueue_active(wq)) 180 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 181 } 182 183 /* 184 * Check whether the given slot is locked. Must be called with the i_pages 185 * lock held. 186 */ 187 static inline int slot_locked(struct address_space *mapping, void **slot) 188 { 189 unsigned long entry = (unsigned long) 190 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); 191 return entry & RADIX_DAX_ENTRY_LOCK; 192 } 193 194 /* 195 * Mark the given slot as locked. Must be called with the i_pages lock held. 196 */ 197 static inline void *lock_slot(struct address_space *mapping, void **slot) 198 { 199 unsigned long entry = (unsigned long) 200 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); 201 202 entry |= RADIX_DAX_ENTRY_LOCK; 203 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry); 204 return (void *)entry; 205 } 206 207 /* 208 * Mark the given slot as unlocked. Must be called with the i_pages lock held. 209 */ 210 static inline void *unlock_slot(struct address_space *mapping, void **slot) 211 { 212 unsigned long entry = (unsigned long) 213 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); 214 215 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK; 216 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry); 217 return (void *)entry; 218 } 219 220 /* 221 * Lookup entry in radix tree, wait for it to become unlocked if it is 222 * exceptional entry and return it. The caller must call 223 * put_unlocked_mapping_entry() when he decided not to lock the entry or 224 * put_locked_mapping_entry() when he locked the entry and now wants to 225 * unlock it. 226 * 227 * Must be called with the i_pages lock held. 228 */ 229 static void *__get_unlocked_mapping_entry(struct address_space *mapping, 230 pgoff_t index, void ***slotp, bool (*wait_fn)(void)) 231 { 232 void *entry, **slot; 233 struct wait_exceptional_entry_queue ewait; 234 wait_queue_head_t *wq; 235 236 init_wait(&ewait.wait); 237 ewait.wait.func = wake_exceptional_entry_func; 238 239 for (;;) { 240 bool revalidate; 241 242 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, 243 &slot); 244 if (!entry || 245 WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) || 246 !slot_locked(mapping, slot)) { 247 if (slotp) 248 *slotp = slot; 249 return entry; 250 } 251 252 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); 253 prepare_to_wait_exclusive(wq, &ewait.wait, 254 TASK_UNINTERRUPTIBLE); 255 xa_unlock_irq(&mapping->i_pages); 256 revalidate = wait_fn(); 257 finish_wait(wq, &ewait.wait); 258 xa_lock_irq(&mapping->i_pages); 259 if (revalidate) 260 return ERR_PTR(-EAGAIN); 261 } 262 } 263 264 static bool entry_wait(void) 265 { 266 schedule(); 267 /* 268 * Never return an ERR_PTR() from 269 * __get_unlocked_mapping_entry(), just keep looping. 270 */ 271 return false; 272 } 273 274 static void *get_unlocked_mapping_entry(struct address_space *mapping, 275 pgoff_t index, void ***slotp) 276 { 277 return __get_unlocked_mapping_entry(mapping, index, slotp, entry_wait); 278 } 279 280 static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index) 281 { 282 void *entry, **slot; 283 284 xa_lock_irq(&mapping->i_pages); 285 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot); 286 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) || 287 !slot_locked(mapping, slot))) { 288 xa_unlock_irq(&mapping->i_pages); 289 return; 290 } 291 unlock_slot(mapping, slot); 292 xa_unlock_irq(&mapping->i_pages); 293 dax_wake_mapping_entry_waiter(mapping, index, entry, false); 294 } 295 296 static void put_locked_mapping_entry(struct address_space *mapping, 297 pgoff_t index) 298 { 299 unlock_mapping_entry(mapping, index); 300 } 301 302 /* 303 * Called when we are done with radix tree entry we looked up via 304 * get_unlocked_mapping_entry() and which we didn't lock in the end. 305 */ 306 static void put_unlocked_mapping_entry(struct address_space *mapping, 307 pgoff_t index, void *entry) 308 { 309 if (!entry) 310 return; 311 312 /* We have to wake up next waiter for the radix tree entry lock */ 313 dax_wake_mapping_entry_waiter(mapping, index, entry, false); 314 } 315 316 static unsigned long dax_entry_size(void *entry) 317 { 318 if (dax_is_zero_entry(entry)) 319 return 0; 320 else if (dax_is_empty_entry(entry)) 321 return 0; 322 else if (dax_is_pmd_entry(entry)) 323 return PMD_SIZE; 324 else 325 return PAGE_SIZE; 326 } 327 328 static unsigned long dax_radix_end_pfn(void *entry) 329 { 330 return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; 331 } 332 333 /* 334 * Iterate through all mapped pfns represented by an entry, i.e. skip 335 * 'empty' and 'zero' entries. 336 */ 337 #define for_each_mapped_pfn(entry, pfn) \ 338 for (pfn = dax_radix_pfn(entry); \ 339 pfn < dax_radix_end_pfn(entry); pfn++) 340 341 /* 342 * TODO: for reflink+dax we need a way to associate a single page with 343 * multiple address_space instances at different linear_page_index() 344 * offsets. 345 */ 346 static void dax_associate_entry(void *entry, struct address_space *mapping, 347 struct vm_area_struct *vma, unsigned long address) 348 { 349 unsigned long size = dax_entry_size(entry), pfn, index; 350 int i = 0; 351 352 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 353 return; 354 355 index = linear_page_index(vma, address & ~(size - 1)); 356 for_each_mapped_pfn(entry, pfn) { 357 struct page *page = pfn_to_page(pfn); 358 359 WARN_ON_ONCE(page->mapping); 360 page->mapping = mapping; 361 page->index = index + i++; 362 } 363 } 364 365 static void dax_disassociate_entry(void *entry, struct address_space *mapping, 366 bool trunc) 367 { 368 unsigned long pfn; 369 370 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 371 return; 372 373 for_each_mapped_pfn(entry, pfn) { 374 struct page *page = pfn_to_page(pfn); 375 376 WARN_ON_ONCE(trunc && page_ref_count(page) > 1); 377 WARN_ON_ONCE(page->mapping && page->mapping != mapping); 378 page->mapping = NULL; 379 page->index = 0; 380 } 381 } 382 383 static struct page *dax_busy_page(void *entry) 384 { 385 unsigned long pfn; 386 387 for_each_mapped_pfn(entry, pfn) { 388 struct page *page = pfn_to_page(pfn); 389 390 if (page_ref_count(page) > 1) 391 return page; 392 } 393 return NULL; 394 } 395 396 static bool entry_wait_revalidate(void) 397 { 398 rcu_read_unlock(); 399 schedule(); 400 rcu_read_lock(); 401 402 /* 403 * Tell __get_unlocked_mapping_entry() to take a break, we need 404 * to revalidate page->mapping after dropping locks 405 */ 406 return true; 407 } 408 409 bool dax_lock_mapping_entry(struct page *page) 410 { 411 pgoff_t index; 412 struct inode *inode; 413 bool did_lock = false; 414 void *entry = NULL, **slot; 415 struct address_space *mapping; 416 417 rcu_read_lock(); 418 for (;;) { 419 mapping = READ_ONCE(page->mapping); 420 421 if (!dax_mapping(mapping)) 422 break; 423 424 /* 425 * In the device-dax case there's no need to lock, a 426 * struct dev_pagemap pin is sufficient to keep the 427 * inode alive, and we assume we have dev_pagemap pin 428 * otherwise we would not have a valid pfn_to_page() 429 * translation. 430 */ 431 inode = mapping->host; 432 if (S_ISCHR(inode->i_mode)) { 433 did_lock = true; 434 break; 435 } 436 437 xa_lock_irq(&mapping->i_pages); 438 if (mapping != page->mapping) { 439 xa_unlock_irq(&mapping->i_pages); 440 continue; 441 } 442 index = page->index; 443 444 entry = __get_unlocked_mapping_entry(mapping, index, &slot, 445 entry_wait_revalidate); 446 if (!entry) { 447 xa_unlock_irq(&mapping->i_pages); 448 break; 449 } else if (IS_ERR(entry)) { 450 xa_unlock_irq(&mapping->i_pages); 451 WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN); 452 continue; 453 } 454 lock_slot(mapping, slot); 455 did_lock = true; 456 xa_unlock_irq(&mapping->i_pages); 457 break; 458 } 459 rcu_read_unlock(); 460 461 return did_lock; 462 } 463 464 void dax_unlock_mapping_entry(struct page *page) 465 { 466 struct address_space *mapping = page->mapping; 467 struct inode *inode = mapping->host; 468 469 if (S_ISCHR(inode->i_mode)) 470 return; 471 472 unlock_mapping_entry(mapping, page->index); 473 } 474 475 /* 476 * Find radix tree entry at given index. If it points to an exceptional entry, 477 * return it with the radix tree entry locked. If the radix tree doesn't 478 * contain given index, create an empty exceptional entry for the index and 479 * return with it locked. 480 * 481 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will 482 * either return that locked entry or will return an error. This error will 483 * happen if there are any 4k entries within the 2MiB range that we are 484 * requesting. 485 * 486 * We always favor 4k entries over 2MiB entries. There isn't a flow where we 487 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB 488 * insertion will fail if it finds any 4k entries already in the tree, and a 489 * 4k insertion will cause an existing 2MiB entry to be unmapped and 490 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as 491 * well as 2MiB empty entries. 492 * 493 * The exception to this downgrade path is for 2MiB DAX PMD entries that have 494 * real storage backing them. We will leave these real 2MiB DAX entries in 495 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry. 496 * 497 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For 498 * persistent memory the benefit is doubtful. We can add that later if we can 499 * show it helps. 500 */ 501 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, 502 unsigned long size_flag) 503 { 504 bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */ 505 void *entry, **slot; 506 507 restart: 508 xa_lock_irq(&mapping->i_pages); 509 entry = get_unlocked_mapping_entry(mapping, index, &slot); 510 511 if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) { 512 entry = ERR_PTR(-EIO); 513 goto out_unlock; 514 } 515 516 if (entry) { 517 if (size_flag & RADIX_DAX_PMD) { 518 if (dax_is_pte_entry(entry)) { 519 put_unlocked_mapping_entry(mapping, index, 520 entry); 521 entry = ERR_PTR(-EEXIST); 522 goto out_unlock; 523 } 524 } else { /* trying to grab a PTE entry */ 525 if (dax_is_pmd_entry(entry) && 526 (dax_is_zero_entry(entry) || 527 dax_is_empty_entry(entry))) { 528 pmd_downgrade = true; 529 } 530 } 531 } 532 533 /* No entry for given index? Make sure radix tree is big enough. */ 534 if (!entry || pmd_downgrade) { 535 int err; 536 537 if (pmd_downgrade) { 538 /* 539 * Make sure 'entry' remains valid while we drop 540 * the i_pages lock. 541 */ 542 entry = lock_slot(mapping, slot); 543 } 544 545 xa_unlock_irq(&mapping->i_pages); 546 /* 547 * Besides huge zero pages the only other thing that gets 548 * downgraded are empty entries which don't need to be 549 * unmapped. 550 */ 551 if (pmd_downgrade && dax_is_zero_entry(entry)) 552 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 553 PG_PMD_NR, false); 554 555 err = radix_tree_preload( 556 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); 557 if (err) { 558 if (pmd_downgrade) 559 put_locked_mapping_entry(mapping, index); 560 return ERR_PTR(err); 561 } 562 xa_lock_irq(&mapping->i_pages); 563 564 if (!entry) { 565 /* 566 * We needed to drop the i_pages lock while calling 567 * radix_tree_preload() and we didn't have an entry to 568 * lock. See if another thread inserted an entry at 569 * our index during this time. 570 */ 571 entry = __radix_tree_lookup(&mapping->i_pages, index, 572 NULL, &slot); 573 if (entry) { 574 radix_tree_preload_end(); 575 xa_unlock_irq(&mapping->i_pages); 576 goto restart; 577 } 578 } 579 580 if (pmd_downgrade) { 581 dax_disassociate_entry(entry, mapping, false); 582 radix_tree_delete(&mapping->i_pages, index); 583 mapping->nrexceptional--; 584 dax_wake_mapping_entry_waiter(mapping, index, entry, 585 true); 586 } 587 588 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY); 589 590 err = __radix_tree_insert(&mapping->i_pages, index, 591 dax_radix_order(entry), entry); 592 radix_tree_preload_end(); 593 if (err) { 594 xa_unlock_irq(&mapping->i_pages); 595 /* 596 * Our insertion of a DAX entry failed, most likely 597 * because we were inserting a PMD entry and it 598 * collided with a PTE sized entry at a different 599 * index in the PMD range. We haven't inserted 600 * anything into the radix tree and have no waiters to 601 * wake. 602 */ 603 return ERR_PTR(err); 604 } 605 /* Good, we have inserted empty locked entry into the tree. */ 606 mapping->nrexceptional++; 607 xa_unlock_irq(&mapping->i_pages); 608 return entry; 609 } 610 entry = lock_slot(mapping, slot); 611 out_unlock: 612 xa_unlock_irq(&mapping->i_pages); 613 return entry; 614 } 615 616 /** 617 * dax_layout_busy_page - find first pinned page in @mapping 618 * @mapping: address space to scan for a page with ref count > 1 619 * 620 * DAX requires ZONE_DEVICE mapped pages. These pages are never 621 * 'onlined' to the page allocator so they are considered idle when 622 * page->count == 1. A filesystem uses this interface to determine if 623 * any page in the mapping is busy, i.e. for DMA, or other 624 * get_user_pages() usages. 625 * 626 * It is expected that the filesystem is holding locks to block the 627 * establishment of new mappings in this address_space. I.e. it expects 628 * to be able to run unmap_mapping_range() and subsequently not race 629 * mapping_mapped() becoming true. 630 */ 631 struct page *dax_layout_busy_page(struct address_space *mapping) 632 { 633 pgoff_t indices[PAGEVEC_SIZE]; 634 struct page *page = NULL; 635 struct pagevec pvec; 636 pgoff_t index, end; 637 unsigned i; 638 639 /* 640 * In the 'limited' case get_user_pages() for dax is disabled. 641 */ 642 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) 643 return NULL; 644 645 if (!dax_mapping(mapping) || !mapping_mapped(mapping)) 646 return NULL; 647 648 pagevec_init(&pvec); 649 index = 0; 650 end = -1; 651 652 /* 653 * If we race get_user_pages_fast() here either we'll see the 654 * elevated page count in the pagevec_lookup and wait, or 655 * get_user_pages_fast() will see that the page it took a reference 656 * against is no longer mapped in the page tables and bail to the 657 * get_user_pages() slow path. The slow path is protected by 658 * pte_lock() and pmd_lock(). New references are not taken without 659 * holding those locks, and unmap_mapping_range() will not zero the 660 * pte or pmd without holding the respective lock, so we are 661 * guaranteed to either see new references or prevent new 662 * references from being established. 663 */ 664 unmap_mapping_range(mapping, 0, 0, 1); 665 666 while (index < end && pagevec_lookup_entries(&pvec, mapping, index, 667 min(end - index, (pgoff_t)PAGEVEC_SIZE), 668 indices)) { 669 pgoff_t nr_pages = 1; 670 671 for (i = 0; i < pagevec_count(&pvec); i++) { 672 struct page *pvec_ent = pvec.pages[i]; 673 void *entry; 674 675 index = indices[i]; 676 if (index >= end) 677 break; 678 679 if (WARN_ON_ONCE( 680 !radix_tree_exceptional_entry(pvec_ent))) 681 continue; 682 683 xa_lock_irq(&mapping->i_pages); 684 entry = get_unlocked_mapping_entry(mapping, index, NULL); 685 if (entry) { 686 page = dax_busy_page(entry); 687 /* 688 * Account for multi-order entries at 689 * the end of the pagevec. 690 */ 691 if (i + 1 >= pagevec_count(&pvec)) 692 nr_pages = 1UL << dax_radix_order(entry); 693 } 694 put_unlocked_mapping_entry(mapping, index, entry); 695 xa_unlock_irq(&mapping->i_pages); 696 if (page) 697 break; 698 } 699 700 /* 701 * We don't expect normal struct page entries to exist in our 702 * tree, but we keep these pagevec calls so that this code is 703 * consistent with the common pattern for handling pagevecs 704 * throughout the kernel. 705 */ 706 pagevec_remove_exceptionals(&pvec); 707 pagevec_release(&pvec); 708 index += nr_pages; 709 710 if (page) 711 break; 712 } 713 return page; 714 } 715 EXPORT_SYMBOL_GPL(dax_layout_busy_page); 716 717 static int __dax_invalidate_mapping_entry(struct address_space *mapping, 718 pgoff_t index, bool trunc) 719 { 720 int ret = 0; 721 void *entry; 722 struct radix_tree_root *pages = &mapping->i_pages; 723 724 xa_lock_irq(pages); 725 entry = get_unlocked_mapping_entry(mapping, index, NULL); 726 if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry))) 727 goto out; 728 if (!trunc && 729 (radix_tree_tag_get(pages, index, PAGECACHE_TAG_DIRTY) || 730 radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE))) 731 goto out; 732 dax_disassociate_entry(entry, mapping, trunc); 733 radix_tree_delete(pages, index); 734 mapping->nrexceptional--; 735 ret = 1; 736 out: 737 put_unlocked_mapping_entry(mapping, index, entry); 738 xa_unlock_irq(pages); 739 return ret; 740 } 741 /* 742 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree 743 * entry to get unlocked before deleting it. 744 */ 745 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 746 { 747 int ret = __dax_invalidate_mapping_entry(mapping, index, true); 748 749 /* 750 * This gets called from truncate / punch_hole path. As such, the caller 751 * must hold locks protecting against concurrent modifications of the 752 * radix tree (usually fs-private i_mmap_sem for writing). Since the 753 * caller has seen exceptional entry for this index, we better find it 754 * at that index as well... 755 */ 756 WARN_ON_ONCE(!ret); 757 return ret; 758 } 759 760 /* 761 * Invalidate exceptional DAX entry if it is clean. 762 */ 763 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 764 pgoff_t index) 765 { 766 return __dax_invalidate_mapping_entry(mapping, index, false); 767 } 768 769 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, 770 sector_t sector, size_t size, struct page *to, 771 unsigned long vaddr) 772 { 773 void *vto, *kaddr; 774 pgoff_t pgoff; 775 long rc; 776 int id; 777 778 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); 779 if (rc) 780 return rc; 781 782 id = dax_read_lock(); 783 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL); 784 if (rc < 0) { 785 dax_read_unlock(id); 786 return rc; 787 } 788 vto = kmap_atomic(to); 789 copy_user_page(vto, (void __force *)kaddr, vaddr, to); 790 kunmap_atomic(vto); 791 dax_read_unlock(id); 792 return 0; 793 } 794 795 /* 796 * By this point grab_mapping_entry() has ensured that we have a locked entry 797 * of the appropriate size so we don't have to worry about downgrading PMDs to 798 * PTEs. If we happen to be trying to insert a PTE and there is a PMD 799 * already in the tree, we will skip the insertion and just dirty the PMD as 800 * appropriate. 801 */ 802 static void *dax_insert_mapping_entry(struct address_space *mapping, 803 struct vm_fault *vmf, 804 void *entry, pfn_t pfn_t, 805 unsigned long flags, bool dirty) 806 { 807 struct radix_tree_root *pages = &mapping->i_pages; 808 unsigned long pfn = pfn_t_to_pfn(pfn_t); 809 pgoff_t index = vmf->pgoff; 810 void *new_entry; 811 812 if (dirty) 813 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 814 815 if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) { 816 /* we are replacing a zero page with block mapping */ 817 if (dax_is_pmd_entry(entry)) 818 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, 819 PG_PMD_NR, false); 820 else /* pte entry */ 821 unmap_mapping_pages(mapping, vmf->pgoff, 1, false); 822 } 823 824 xa_lock_irq(pages); 825 new_entry = dax_radix_locked_entry(pfn, flags); 826 if (dax_entry_size(entry) != dax_entry_size(new_entry)) { 827 dax_disassociate_entry(entry, mapping, false); 828 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); 829 } 830 831 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 832 /* 833 * Only swap our new entry into the radix tree if the current 834 * entry is a zero page or an empty entry. If a normal PTE or 835 * PMD entry is already in the tree, we leave it alone. This 836 * means that if we are trying to insert a PTE and the 837 * existing entry is a PMD, we will just leave the PMD in the 838 * tree and dirty it if necessary. 839 */ 840 struct radix_tree_node *node; 841 void **slot; 842 void *ret; 843 844 ret = __radix_tree_lookup(pages, index, &node, &slot); 845 WARN_ON_ONCE(ret != entry); 846 __radix_tree_replace(pages, node, slot, 847 new_entry, NULL); 848 entry = new_entry; 849 } 850 851 if (dirty) 852 radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY); 853 854 xa_unlock_irq(pages); 855 return entry; 856 } 857 858 static inline unsigned long 859 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma) 860 { 861 unsigned long address; 862 863 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 864 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 865 return address; 866 } 867 868 /* Walk all mappings of a given index of a file and writeprotect them */ 869 static void dax_mapping_entry_mkclean(struct address_space *mapping, 870 pgoff_t index, unsigned long pfn) 871 { 872 struct vm_area_struct *vma; 873 pte_t pte, *ptep = NULL; 874 pmd_t *pmdp = NULL; 875 spinlock_t *ptl; 876 877 i_mmap_lock_read(mapping); 878 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { 879 unsigned long address, start, end; 880 881 cond_resched(); 882 883 if (!(vma->vm_flags & VM_SHARED)) 884 continue; 885 886 address = pgoff_address(index, vma); 887 888 /* 889 * Note because we provide start/end to follow_pte_pmd it will 890 * call mmu_notifier_invalidate_range_start() on our behalf 891 * before taking any lock. 892 */ 893 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl)) 894 continue; 895 896 /* 897 * No need to call mmu_notifier_invalidate_range() as we are 898 * downgrading page table protection not changing it to point 899 * to a new page. 900 * 901 * See Documentation/vm/mmu_notifier.rst 902 */ 903 if (pmdp) { 904 #ifdef CONFIG_FS_DAX_PMD 905 pmd_t pmd; 906 907 if (pfn != pmd_pfn(*pmdp)) 908 goto unlock_pmd; 909 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) 910 goto unlock_pmd; 911 912 flush_cache_page(vma, address, pfn); 913 pmd = pmdp_huge_clear_flush(vma, address, pmdp); 914 pmd = pmd_wrprotect(pmd); 915 pmd = pmd_mkclean(pmd); 916 set_pmd_at(vma->vm_mm, address, pmdp, pmd); 917 unlock_pmd: 918 #endif 919 spin_unlock(ptl); 920 } else { 921 if (pfn != pte_pfn(*ptep)) 922 goto unlock_pte; 923 if (!pte_dirty(*ptep) && !pte_write(*ptep)) 924 goto unlock_pte; 925 926 flush_cache_page(vma, address, pfn); 927 pte = ptep_clear_flush(vma, address, ptep); 928 pte = pte_wrprotect(pte); 929 pte = pte_mkclean(pte); 930 set_pte_at(vma->vm_mm, address, ptep, pte); 931 unlock_pte: 932 pte_unmap_unlock(ptep, ptl); 933 } 934 935 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); 936 } 937 i_mmap_unlock_read(mapping); 938 } 939 940 static int dax_writeback_one(struct dax_device *dax_dev, 941 struct address_space *mapping, pgoff_t index, void *entry) 942 { 943 struct radix_tree_root *pages = &mapping->i_pages; 944 void *entry2, **slot; 945 unsigned long pfn; 946 long ret = 0; 947 size_t size; 948 949 /* 950 * A page got tagged dirty in DAX mapping? Something is seriously 951 * wrong. 952 */ 953 if (WARN_ON(!radix_tree_exceptional_entry(entry))) 954 return -EIO; 955 956 xa_lock_irq(pages); 957 entry2 = get_unlocked_mapping_entry(mapping, index, &slot); 958 /* Entry got punched out / reallocated? */ 959 if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2))) 960 goto put_unlocked; 961 /* 962 * Entry got reallocated elsewhere? No need to writeback. We have to 963 * compare pfns as we must not bail out due to difference in lockbit 964 * or entry type. 965 */ 966 if (dax_radix_pfn(entry2) != dax_radix_pfn(entry)) 967 goto put_unlocked; 968 if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 969 dax_is_zero_entry(entry))) { 970 ret = -EIO; 971 goto put_unlocked; 972 } 973 974 /* Another fsync thread may have already written back this entry */ 975 if (!radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE)) 976 goto put_unlocked; 977 /* Lock the entry to serialize with page faults */ 978 entry = lock_slot(mapping, slot); 979 /* 980 * We can clear the tag now but we have to be careful so that concurrent 981 * dax_writeback_one() calls for the same index cannot finish before we 982 * actually flush the caches. This is achieved as the calls will look 983 * at the entry only under the i_pages lock and once they do that 984 * they will see the entry locked and wait for it to unlock. 985 */ 986 radix_tree_tag_clear(pages, index, PAGECACHE_TAG_TOWRITE); 987 xa_unlock_irq(pages); 988 989 /* 990 * Even if dax_writeback_mapping_range() was given a wbc->range_start 991 * in the middle of a PMD, the 'index' we are given will be aligned to 992 * the start index of the PMD, as will the pfn we pull from 'entry'. 993 * This allows us to flush for PMD_SIZE and not have to worry about 994 * partial PMD writebacks. 995 */ 996 pfn = dax_radix_pfn(entry); 997 size = PAGE_SIZE << dax_radix_order(entry); 998 999 dax_mapping_entry_mkclean(mapping, index, pfn); 1000 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size); 1001 /* 1002 * After we have flushed the cache, we can clear the dirty tag. There 1003 * cannot be new dirty data in the pfn after the flush has completed as 1004 * the pfn mappings are writeprotected and fault waits for mapping 1005 * entry lock. 1006 */ 1007 xa_lock_irq(pages); 1008 radix_tree_tag_clear(pages, index, PAGECACHE_TAG_DIRTY); 1009 xa_unlock_irq(pages); 1010 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT); 1011 put_locked_mapping_entry(mapping, index); 1012 return ret; 1013 1014 put_unlocked: 1015 put_unlocked_mapping_entry(mapping, index, entry2); 1016 xa_unlock_irq(pages); 1017 return ret; 1018 } 1019 1020 /* 1021 * Flush the mapping to the persistent domain within the byte range of [start, 1022 * end]. This is required by data integrity operations to ensure file data is 1023 * on persistent storage prior to completion of the operation. 1024 */ 1025 int dax_writeback_mapping_range(struct address_space *mapping, 1026 struct block_device *bdev, struct writeback_control *wbc) 1027 { 1028 struct inode *inode = mapping->host; 1029 pgoff_t start_index, end_index; 1030 pgoff_t indices[PAGEVEC_SIZE]; 1031 struct dax_device *dax_dev; 1032 struct pagevec pvec; 1033 bool done = false; 1034 int i, ret = 0; 1035 1036 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 1037 return -EIO; 1038 1039 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) 1040 return 0; 1041 1042 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 1043 if (!dax_dev) 1044 return -EIO; 1045 1046 start_index = wbc->range_start >> PAGE_SHIFT; 1047 end_index = wbc->range_end >> PAGE_SHIFT; 1048 1049 trace_dax_writeback_range(inode, start_index, end_index); 1050 1051 tag_pages_for_writeback(mapping, start_index, end_index); 1052 1053 pagevec_init(&pvec); 1054 while (!done) { 1055 pvec.nr = find_get_entries_tag(mapping, start_index, 1056 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE, 1057 pvec.pages, indices); 1058 1059 if (pvec.nr == 0) 1060 break; 1061 1062 for (i = 0; i < pvec.nr; i++) { 1063 if (indices[i] > end_index) { 1064 done = true; 1065 break; 1066 } 1067 1068 ret = dax_writeback_one(dax_dev, mapping, indices[i], 1069 pvec.pages[i]); 1070 if (ret < 0) { 1071 mapping_set_error(mapping, ret); 1072 goto out; 1073 } 1074 } 1075 start_index = indices[pvec.nr - 1] + 1; 1076 } 1077 out: 1078 put_dax(dax_dev); 1079 trace_dax_writeback_range_done(inode, start_index, end_index); 1080 return (ret < 0 ? ret : 0); 1081 } 1082 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); 1083 1084 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 1085 { 1086 return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9; 1087 } 1088 1089 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size, 1090 pfn_t *pfnp) 1091 { 1092 const sector_t sector = dax_iomap_sector(iomap, pos); 1093 pgoff_t pgoff; 1094 int id, rc; 1095 long length; 1096 1097 rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff); 1098 if (rc) 1099 return rc; 1100 id = dax_read_lock(); 1101 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), 1102 NULL, pfnp); 1103 if (length < 0) { 1104 rc = length; 1105 goto out; 1106 } 1107 rc = -EINVAL; 1108 if (PFN_PHYS(length) < size) 1109 goto out; 1110 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) 1111 goto out; 1112 /* For larger pages we need devmap */ 1113 if (length > 1 && !pfn_t_devmap(*pfnp)) 1114 goto out; 1115 rc = 0; 1116 out: 1117 dax_read_unlock(id); 1118 return rc; 1119 } 1120 1121 /* 1122 * The user has performed a load from a hole in the file. Allocating a new 1123 * page in the file would cause excessive storage usage for workloads with 1124 * sparse files. Instead we insert a read-only mapping of the 4k zero page. 1125 * If this page is ever written to we will re-fault and change the mapping to 1126 * point to real DAX storage instead. 1127 */ 1128 static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry, 1129 struct vm_fault *vmf) 1130 { 1131 struct inode *inode = mapping->host; 1132 unsigned long vaddr = vmf->address; 1133 pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); 1134 vm_fault_t ret; 1135 1136 dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE, 1137 false); 1138 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); 1139 trace_dax_load_hole(inode, vmf, ret); 1140 return ret; 1141 } 1142 1143 static bool dax_range_is_aligned(struct block_device *bdev, 1144 unsigned int offset, unsigned int length) 1145 { 1146 unsigned short sector_size = bdev_logical_block_size(bdev); 1147 1148 if (!IS_ALIGNED(offset, sector_size)) 1149 return false; 1150 if (!IS_ALIGNED(length, sector_size)) 1151 return false; 1152 1153 return true; 1154 } 1155 1156 int __dax_zero_page_range(struct block_device *bdev, 1157 struct dax_device *dax_dev, sector_t sector, 1158 unsigned int offset, unsigned int size) 1159 { 1160 if (dax_range_is_aligned(bdev, offset, size)) { 1161 sector_t start_sector = sector + (offset >> 9); 1162 1163 return blkdev_issue_zeroout(bdev, start_sector, 1164 size >> 9, GFP_NOFS, 0); 1165 } else { 1166 pgoff_t pgoff; 1167 long rc, id; 1168 void *kaddr; 1169 1170 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); 1171 if (rc) 1172 return rc; 1173 1174 id = dax_read_lock(); 1175 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL); 1176 if (rc < 0) { 1177 dax_read_unlock(id); 1178 return rc; 1179 } 1180 memset(kaddr + offset, 0, size); 1181 dax_flush(dax_dev, kaddr + offset, size); 1182 dax_read_unlock(id); 1183 } 1184 return 0; 1185 } 1186 EXPORT_SYMBOL_GPL(__dax_zero_page_range); 1187 1188 static loff_t 1189 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 1190 struct iomap *iomap) 1191 { 1192 struct block_device *bdev = iomap->bdev; 1193 struct dax_device *dax_dev = iomap->dax_dev; 1194 struct iov_iter *iter = data; 1195 loff_t end = pos + length, done = 0; 1196 ssize_t ret = 0; 1197 size_t xfer; 1198 int id; 1199 1200 if (iov_iter_rw(iter) == READ) { 1201 end = min(end, i_size_read(inode)); 1202 if (pos >= end) 1203 return 0; 1204 1205 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 1206 return iov_iter_zero(min(length, end - pos), iter); 1207 } 1208 1209 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 1210 return -EIO; 1211 1212 /* 1213 * Write can allocate block for an area which has a hole page mapped 1214 * into page tables. We have to tear down these mappings so that data 1215 * written by write(2) is visible in mmap. 1216 */ 1217 if (iomap->flags & IOMAP_F_NEW) { 1218 invalidate_inode_pages2_range(inode->i_mapping, 1219 pos >> PAGE_SHIFT, 1220 (end - 1) >> PAGE_SHIFT); 1221 } 1222 1223 id = dax_read_lock(); 1224 while (pos < end) { 1225 unsigned offset = pos & (PAGE_SIZE - 1); 1226 const size_t size = ALIGN(length + offset, PAGE_SIZE); 1227 const sector_t sector = dax_iomap_sector(iomap, pos); 1228 ssize_t map_len; 1229 pgoff_t pgoff; 1230 void *kaddr; 1231 1232 if (fatal_signal_pending(current)) { 1233 ret = -EINTR; 1234 break; 1235 } 1236 1237 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 1238 if (ret) 1239 break; 1240 1241 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), 1242 &kaddr, NULL); 1243 if (map_len < 0) { 1244 ret = map_len; 1245 break; 1246 } 1247 1248 map_len = PFN_PHYS(map_len); 1249 kaddr += offset; 1250 map_len -= offset; 1251 if (map_len > end - pos) 1252 map_len = end - pos; 1253 1254 /* 1255 * The userspace address for the memory copy has already been 1256 * validated via access_ok() in either vfs_read() or 1257 * vfs_write(), depending on which operation we are doing. 1258 */ 1259 if (iov_iter_rw(iter) == WRITE) 1260 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, 1261 map_len, iter); 1262 else 1263 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, 1264 map_len, iter); 1265 1266 pos += xfer; 1267 length -= xfer; 1268 done += xfer; 1269 1270 if (xfer == 0) 1271 ret = -EFAULT; 1272 if (xfer < map_len) 1273 break; 1274 } 1275 dax_read_unlock(id); 1276 1277 return done ? done : ret; 1278 } 1279 1280 /** 1281 * dax_iomap_rw - Perform I/O to a DAX file 1282 * @iocb: The control block for this I/O 1283 * @iter: The addresses to do I/O from or to 1284 * @ops: iomap ops passed from the file system 1285 * 1286 * This function performs read and write operations to directly mapped 1287 * persistent memory. The callers needs to take care of read/write exclusion 1288 * and evicting any page cache pages in the region under I/O. 1289 */ 1290 ssize_t 1291 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 1292 const struct iomap_ops *ops) 1293 { 1294 struct address_space *mapping = iocb->ki_filp->f_mapping; 1295 struct inode *inode = mapping->host; 1296 loff_t pos = iocb->ki_pos, ret = 0, done = 0; 1297 unsigned flags = 0; 1298 1299 if (iov_iter_rw(iter) == WRITE) { 1300 lockdep_assert_held_exclusive(&inode->i_rwsem); 1301 flags |= IOMAP_WRITE; 1302 } else { 1303 lockdep_assert_held(&inode->i_rwsem); 1304 } 1305 1306 while (iov_iter_count(iter)) { 1307 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, 1308 iter, dax_iomap_actor); 1309 if (ret <= 0) 1310 break; 1311 pos += ret; 1312 done += ret; 1313 } 1314 1315 iocb->ki_pos += done; 1316 return done ? done : ret; 1317 } 1318 EXPORT_SYMBOL_GPL(dax_iomap_rw); 1319 1320 static vm_fault_t dax_fault_return(int error) 1321 { 1322 if (error == 0) 1323 return VM_FAULT_NOPAGE; 1324 if (error == -ENOMEM) 1325 return VM_FAULT_OOM; 1326 return VM_FAULT_SIGBUS; 1327 } 1328 1329 /* 1330 * MAP_SYNC on a dax mapping guarantees dirty metadata is 1331 * flushed on write-faults (non-cow), but not read-faults. 1332 */ 1333 static bool dax_fault_is_synchronous(unsigned long flags, 1334 struct vm_area_struct *vma, struct iomap *iomap) 1335 { 1336 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) 1337 && (iomap->flags & IOMAP_F_DIRTY); 1338 } 1339 1340 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1341 int *iomap_errp, const struct iomap_ops *ops) 1342 { 1343 struct vm_area_struct *vma = vmf->vma; 1344 struct address_space *mapping = vma->vm_file->f_mapping; 1345 struct inode *inode = mapping->host; 1346 unsigned long vaddr = vmf->address; 1347 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; 1348 struct iomap iomap = { 0 }; 1349 unsigned flags = IOMAP_FAULT; 1350 int error, major = 0; 1351 bool write = vmf->flags & FAULT_FLAG_WRITE; 1352 bool sync; 1353 vm_fault_t ret = 0; 1354 void *entry; 1355 pfn_t pfn; 1356 1357 trace_dax_pte_fault(inode, vmf, ret); 1358 /* 1359 * Check whether offset isn't beyond end of file now. Caller is supposed 1360 * to hold locks serializing us with truncate / punch hole so this is 1361 * a reliable test. 1362 */ 1363 if (pos >= i_size_read(inode)) { 1364 ret = VM_FAULT_SIGBUS; 1365 goto out; 1366 } 1367 1368 if (write && !vmf->cow_page) 1369 flags |= IOMAP_WRITE; 1370 1371 entry = grab_mapping_entry(mapping, vmf->pgoff, 0); 1372 if (IS_ERR(entry)) { 1373 ret = dax_fault_return(PTR_ERR(entry)); 1374 goto out; 1375 } 1376 1377 /* 1378 * It is possible, particularly with mixed reads & writes to private 1379 * mappings, that we have raced with a PMD fault that overlaps with 1380 * the PTE we need to set up. If so just return and the fault will be 1381 * retried. 1382 */ 1383 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1384 ret = VM_FAULT_NOPAGE; 1385 goto unlock_entry; 1386 } 1387 1388 /* 1389 * Note that we don't bother to use iomap_apply here: DAX required 1390 * the file system block size to be equal the page size, which means 1391 * that we never have to deal with more than a single extent here. 1392 */ 1393 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); 1394 if (iomap_errp) 1395 *iomap_errp = error; 1396 if (error) { 1397 ret = dax_fault_return(error); 1398 goto unlock_entry; 1399 } 1400 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 1401 error = -EIO; /* fs corruption? */ 1402 goto error_finish_iomap; 1403 } 1404 1405 if (vmf->cow_page) { 1406 sector_t sector = dax_iomap_sector(&iomap, pos); 1407 1408 switch (iomap.type) { 1409 case IOMAP_HOLE: 1410 case IOMAP_UNWRITTEN: 1411 clear_user_highpage(vmf->cow_page, vaddr); 1412 break; 1413 case IOMAP_MAPPED: 1414 error = copy_user_dax(iomap.bdev, iomap.dax_dev, 1415 sector, PAGE_SIZE, vmf->cow_page, vaddr); 1416 break; 1417 default: 1418 WARN_ON_ONCE(1); 1419 error = -EIO; 1420 break; 1421 } 1422 1423 if (error) 1424 goto error_finish_iomap; 1425 1426 __SetPageUptodate(vmf->cow_page); 1427 ret = finish_fault(vmf); 1428 if (!ret) 1429 ret = VM_FAULT_DONE_COW; 1430 goto finish_iomap; 1431 } 1432 1433 sync = dax_fault_is_synchronous(flags, vma, &iomap); 1434 1435 switch (iomap.type) { 1436 case IOMAP_MAPPED: 1437 if (iomap.flags & IOMAP_F_NEW) { 1438 count_vm_event(PGMAJFAULT); 1439 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 1440 major = VM_FAULT_MAJOR; 1441 } 1442 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn); 1443 if (error < 0) 1444 goto error_finish_iomap; 1445 1446 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn, 1447 0, write && !sync); 1448 1449 /* 1450 * If we are doing synchronous page fault and inode needs fsync, 1451 * we can insert PTE into page tables only after that happens. 1452 * Skip insertion for now and return the pfn so that caller can 1453 * insert it after fsync is done. 1454 */ 1455 if (sync) { 1456 if (WARN_ON_ONCE(!pfnp)) { 1457 error = -EIO; 1458 goto error_finish_iomap; 1459 } 1460 *pfnp = pfn; 1461 ret = VM_FAULT_NEEDDSYNC | major; 1462 goto finish_iomap; 1463 } 1464 trace_dax_insert_mapping(inode, vmf, entry); 1465 if (write) 1466 ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn); 1467 else 1468 ret = vmf_insert_mixed(vma, vaddr, pfn); 1469 1470 goto finish_iomap; 1471 case IOMAP_UNWRITTEN: 1472 case IOMAP_HOLE: 1473 if (!write) { 1474 ret = dax_load_hole(mapping, entry, vmf); 1475 goto finish_iomap; 1476 } 1477 /*FALLTHRU*/ 1478 default: 1479 WARN_ON_ONCE(1); 1480 error = -EIO; 1481 break; 1482 } 1483 1484 error_finish_iomap: 1485 ret = dax_fault_return(error); 1486 finish_iomap: 1487 if (ops->iomap_end) { 1488 int copied = PAGE_SIZE; 1489 1490 if (ret & VM_FAULT_ERROR) 1491 copied = 0; 1492 /* 1493 * The fault is done by now and there's no way back (other 1494 * thread may be already happily using PTE we have installed). 1495 * Just ignore error from ->iomap_end since we cannot do much 1496 * with it. 1497 */ 1498 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); 1499 } 1500 unlock_entry: 1501 put_locked_mapping_entry(mapping, vmf->pgoff); 1502 out: 1503 trace_dax_pte_fault_done(inode, vmf, ret); 1504 return ret | major; 1505 } 1506 1507 #ifdef CONFIG_FS_DAX_PMD 1508 static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap, 1509 void *entry) 1510 { 1511 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1512 unsigned long pmd_addr = vmf->address & PMD_MASK; 1513 struct inode *inode = mapping->host; 1514 struct page *zero_page; 1515 void *ret = NULL; 1516 spinlock_t *ptl; 1517 pmd_t pmd_entry; 1518 pfn_t pfn; 1519 1520 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1521 1522 if (unlikely(!zero_page)) 1523 goto fallback; 1524 1525 pfn = page_to_pfn_t(zero_page); 1526 ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn, 1527 RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false); 1528 1529 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1530 if (!pmd_none(*(vmf->pmd))) { 1531 spin_unlock(ptl); 1532 goto fallback; 1533 } 1534 1535 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); 1536 pmd_entry = pmd_mkhuge(pmd_entry); 1537 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); 1538 spin_unlock(ptl); 1539 trace_dax_pmd_load_hole(inode, vmf, zero_page, ret); 1540 return VM_FAULT_NOPAGE; 1541 1542 fallback: 1543 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret); 1544 return VM_FAULT_FALLBACK; 1545 } 1546 1547 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1548 const struct iomap_ops *ops) 1549 { 1550 struct vm_area_struct *vma = vmf->vma; 1551 struct address_space *mapping = vma->vm_file->f_mapping; 1552 unsigned long pmd_addr = vmf->address & PMD_MASK; 1553 bool write = vmf->flags & FAULT_FLAG_WRITE; 1554 bool sync; 1555 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; 1556 struct inode *inode = mapping->host; 1557 vm_fault_t result = VM_FAULT_FALLBACK; 1558 struct iomap iomap = { 0 }; 1559 pgoff_t max_pgoff, pgoff; 1560 void *entry; 1561 loff_t pos; 1562 int error; 1563 pfn_t pfn; 1564 1565 /* 1566 * Check whether offset isn't beyond end of file now. Caller is 1567 * supposed to hold locks serializing us with truncate / punch hole so 1568 * this is a reliable test. 1569 */ 1570 pgoff = linear_page_index(vma, pmd_addr); 1571 max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1572 1573 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); 1574 1575 /* 1576 * Make sure that the faulting address's PMD offset (color) matches 1577 * the PMD offset from the start of the file. This is necessary so 1578 * that a PMD range in the page table overlaps exactly with a PMD 1579 * range in the radix tree. 1580 */ 1581 if ((vmf->pgoff & PG_PMD_COLOUR) != 1582 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) 1583 goto fallback; 1584 1585 /* Fall back to PTEs if we're going to COW */ 1586 if (write && !(vma->vm_flags & VM_SHARED)) 1587 goto fallback; 1588 1589 /* If the PMD would extend outside the VMA */ 1590 if (pmd_addr < vma->vm_start) 1591 goto fallback; 1592 if ((pmd_addr + PMD_SIZE) > vma->vm_end) 1593 goto fallback; 1594 1595 if (pgoff >= max_pgoff) { 1596 result = VM_FAULT_SIGBUS; 1597 goto out; 1598 } 1599 1600 /* If the PMD would extend beyond the file size */ 1601 if ((pgoff | PG_PMD_COLOUR) >= max_pgoff) 1602 goto fallback; 1603 1604 /* 1605 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a 1606 * 2MiB zero page entry or a DAX PMD. If it can't (because a 4k page 1607 * is already in the tree, for instance), it will return -EEXIST and 1608 * we just fall back to 4k entries. 1609 */ 1610 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); 1611 if (IS_ERR(entry)) 1612 goto fallback; 1613 1614 /* 1615 * It is possible, particularly with mixed reads & writes to private 1616 * mappings, that we have raced with a PTE fault that overlaps with 1617 * the PMD we need to set up. If so just return and the fault will be 1618 * retried. 1619 */ 1620 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1621 !pmd_devmap(*vmf->pmd)) { 1622 result = 0; 1623 goto unlock_entry; 1624 } 1625 1626 /* 1627 * Note that we don't use iomap_apply here. We aren't doing I/O, only 1628 * setting up a mapping, so really we're using iomap_begin() as a way 1629 * to look up our filesystem block. 1630 */ 1631 pos = (loff_t)pgoff << PAGE_SHIFT; 1632 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); 1633 if (error) 1634 goto unlock_entry; 1635 1636 if (iomap.offset + iomap.length < pos + PMD_SIZE) 1637 goto finish_iomap; 1638 1639 sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap); 1640 1641 switch (iomap.type) { 1642 case IOMAP_MAPPED: 1643 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn); 1644 if (error < 0) 1645 goto finish_iomap; 1646 1647 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn, 1648 RADIX_DAX_PMD, write && !sync); 1649 1650 /* 1651 * If we are doing synchronous page fault and inode needs fsync, 1652 * we can insert PMD into page tables only after that happens. 1653 * Skip insertion for now and return the pfn so that caller can 1654 * insert it after fsync is done. 1655 */ 1656 if (sync) { 1657 if (WARN_ON_ONCE(!pfnp)) 1658 goto finish_iomap; 1659 *pfnp = pfn; 1660 result = VM_FAULT_NEEDDSYNC; 1661 goto finish_iomap; 1662 } 1663 1664 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); 1665 result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn, 1666 write); 1667 break; 1668 case IOMAP_UNWRITTEN: 1669 case IOMAP_HOLE: 1670 if (WARN_ON_ONCE(write)) 1671 break; 1672 result = dax_pmd_load_hole(vmf, &iomap, entry); 1673 break; 1674 default: 1675 WARN_ON_ONCE(1); 1676 break; 1677 } 1678 1679 finish_iomap: 1680 if (ops->iomap_end) { 1681 int copied = PMD_SIZE; 1682 1683 if (result == VM_FAULT_FALLBACK) 1684 copied = 0; 1685 /* 1686 * The fault is done by now and there's no way back (other 1687 * thread may be already happily using PMD we have installed). 1688 * Just ignore error from ->iomap_end since we cannot do much 1689 * with it. 1690 */ 1691 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, 1692 &iomap); 1693 } 1694 unlock_entry: 1695 put_locked_mapping_entry(mapping, pgoff); 1696 fallback: 1697 if (result == VM_FAULT_FALLBACK) { 1698 split_huge_pmd(vma, vmf->pmd, vmf->address); 1699 count_vm_event(THP_FAULT_FALLBACK); 1700 } 1701 out: 1702 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); 1703 return result; 1704 } 1705 #else 1706 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, 1707 const struct iomap_ops *ops) 1708 { 1709 return VM_FAULT_FALLBACK; 1710 } 1711 #endif /* CONFIG_FS_DAX_PMD */ 1712 1713 /** 1714 * dax_iomap_fault - handle a page fault on a DAX file 1715 * @vmf: The description of the fault 1716 * @pe_size: Size of the page to fault in 1717 * @pfnp: PFN to insert for synchronous faults if fsync is required 1718 * @iomap_errp: Storage for detailed error code in case of error 1719 * @ops: Iomap ops passed from the file system 1720 * 1721 * When a page fault occurs, filesystems may call this helper in 1722 * their fault handler for DAX files. dax_iomap_fault() assumes the caller 1723 * has done all the necessary locking for page fault to proceed 1724 * successfully. 1725 */ 1726 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1727 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) 1728 { 1729 switch (pe_size) { 1730 case PE_SIZE_PTE: 1731 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); 1732 case PE_SIZE_PMD: 1733 return dax_iomap_pmd_fault(vmf, pfnp, ops); 1734 default: 1735 return VM_FAULT_FALLBACK; 1736 } 1737 } 1738 EXPORT_SYMBOL_GPL(dax_iomap_fault); 1739 1740 /** 1741 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables 1742 * @vmf: The description of the fault 1743 * @pe_size: Size of entry to be inserted 1744 * @pfn: PFN to insert 1745 * 1746 * This function inserts writeable PTE or PMD entry into page tables for mmaped 1747 * DAX file. It takes care of marking corresponding radix tree entry as dirty 1748 * as well. 1749 */ 1750 static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf, 1751 enum page_entry_size pe_size, 1752 pfn_t pfn) 1753 { 1754 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 1755 void *entry, **slot; 1756 pgoff_t index = vmf->pgoff; 1757 vm_fault_t ret; 1758 1759 xa_lock_irq(&mapping->i_pages); 1760 entry = get_unlocked_mapping_entry(mapping, index, &slot); 1761 /* Did we race with someone splitting entry or so? */ 1762 if (!entry || 1763 (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) || 1764 (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) { 1765 put_unlocked_mapping_entry(mapping, index, entry); 1766 xa_unlock_irq(&mapping->i_pages); 1767 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 1768 VM_FAULT_NOPAGE); 1769 return VM_FAULT_NOPAGE; 1770 } 1771 radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY); 1772 entry = lock_slot(mapping, slot); 1773 xa_unlock_irq(&mapping->i_pages); 1774 switch (pe_size) { 1775 case PE_SIZE_PTE: 1776 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 1777 break; 1778 #ifdef CONFIG_FS_DAX_PMD 1779 case PE_SIZE_PMD: 1780 ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, 1781 pfn, true); 1782 break; 1783 #endif 1784 default: 1785 ret = VM_FAULT_FALLBACK; 1786 } 1787 put_locked_mapping_entry(mapping, index); 1788 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); 1789 return ret; 1790 } 1791 1792 /** 1793 * dax_finish_sync_fault - finish synchronous page fault 1794 * @vmf: The description of the fault 1795 * @pe_size: Size of entry to be inserted 1796 * @pfn: PFN to insert 1797 * 1798 * This function ensures that the file range touched by the page fault is 1799 * stored persistently on the media and handles inserting of appropriate page 1800 * table entry. 1801 */ 1802 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 1803 enum page_entry_size pe_size, pfn_t pfn) 1804 { 1805 int err; 1806 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; 1807 size_t len = 0; 1808 1809 if (pe_size == PE_SIZE_PTE) 1810 len = PAGE_SIZE; 1811 else if (pe_size == PE_SIZE_PMD) 1812 len = PMD_SIZE; 1813 else 1814 WARN_ON_ONCE(1); 1815 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); 1816 if (err) 1817 return VM_FAULT_SIGBUS; 1818 return dax_insert_pfn_mkwrite(vmf, pe_size, pfn); 1819 } 1820 EXPORT_SYMBOL_GPL(dax_finish_sync_fault); 1821