xref: /linux/fs/dax.c (revision 0e685c3e7158d35626d6d76b9f859eae806d87fa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * fs/dax.c - Direct Access filesystem code
4  * Copyright (c) 2013-2014 Intel Corporation
5  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
6  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7  */
8 
9 #include <linux/atomic.h>
10 #include <linux/blkdev.h>
11 #include <linux/buffer_head.h>
12 #include <linux/dax.h>
13 #include <linux/fs.h>
14 #include <linux/highmem.h>
15 #include <linux/memcontrol.h>
16 #include <linux/mm.h>
17 #include <linux/mutex.h>
18 #include <linux/pagevec.h>
19 #include <linux/sched.h>
20 #include <linux/sched/signal.h>
21 #include <linux/uio.h>
22 #include <linux/vmstat.h>
23 #include <linux/pfn_t.h>
24 #include <linux/sizes.h>
25 #include <linux/mmu_notifier.h>
26 #include <linux/iomap.h>
27 #include <linux/rmap.h>
28 #include <asm/pgalloc.h>
29 
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/fs_dax.h>
32 
33 static inline unsigned int pe_order(enum page_entry_size pe_size)
34 {
35 	if (pe_size == PE_SIZE_PTE)
36 		return PAGE_SHIFT - PAGE_SHIFT;
37 	if (pe_size == PE_SIZE_PMD)
38 		return PMD_SHIFT - PAGE_SHIFT;
39 	if (pe_size == PE_SIZE_PUD)
40 		return PUD_SHIFT - PAGE_SHIFT;
41 	return ~0;
42 }
43 
44 /* We choose 4096 entries - same as per-zone page wait tables */
45 #define DAX_WAIT_TABLE_BITS 12
46 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
47 
48 /* The 'colour' (ie low bits) within a PMD of a page offset.  */
49 #define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
50 #define PG_PMD_NR	(PMD_SIZE >> PAGE_SHIFT)
51 
52 /* The order of a PMD entry */
53 #define PMD_ORDER	(PMD_SHIFT - PAGE_SHIFT)
54 
55 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
56 
57 static int __init init_dax_wait_table(void)
58 {
59 	int i;
60 
61 	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
62 		init_waitqueue_head(wait_table + i);
63 	return 0;
64 }
65 fs_initcall(init_dax_wait_table);
66 
67 /*
68  * DAX pagecache entries use XArray value entries so they can't be mistaken
69  * for pages.  We use one bit for locking, one bit for the entry size (PMD)
70  * and two more to tell us if the entry is a zero page or an empty entry that
71  * is just used for locking.  In total four special bits.
72  *
73  * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
74  * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
75  * block allocation.
76  */
77 #define DAX_SHIFT	(4)
78 #define DAX_LOCKED	(1UL << 0)
79 #define DAX_PMD		(1UL << 1)
80 #define DAX_ZERO_PAGE	(1UL << 2)
81 #define DAX_EMPTY	(1UL << 3)
82 
83 static unsigned long dax_to_pfn(void *entry)
84 {
85 	return xa_to_value(entry) >> DAX_SHIFT;
86 }
87 
88 static void *dax_make_entry(pfn_t pfn, unsigned long flags)
89 {
90 	return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
91 }
92 
93 static bool dax_is_locked(void *entry)
94 {
95 	return xa_to_value(entry) & DAX_LOCKED;
96 }
97 
98 static unsigned int dax_entry_order(void *entry)
99 {
100 	if (xa_to_value(entry) & DAX_PMD)
101 		return PMD_ORDER;
102 	return 0;
103 }
104 
105 static unsigned long dax_is_pmd_entry(void *entry)
106 {
107 	return xa_to_value(entry) & DAX_PMD;
108 }
109 
110 static bool dax_is_pte_entry(void *entry)
111 {
112 	return !(xa_to_value(entry) & DAX_PMD);
113 }
114 
115 static int dax_is_zero_entry(void *entry)
116 {
117 	return xa_to_value(entry) & DAX_ZERO_PAGE;
118 }
119 
120 static int dax_is_empty_entry(void *entry)
121 {
122 	return xa_to_value(entry) & DAX_EMPTY;
123 }
124 
125 /*
126  * true if the entry that was found is of a smaller order than the entry
127  * we were looking for
128  */
129 static bool dax_is_conflict(void *entry)
130 {
131 	return entry == XA_RETRY_ENTRY;
132 }
133 
134 /*
135  * DAX page cache entry locking
136  */
137 struct exceptional_entry_key {
138 	struct xarray *xa;
139 	pgoff_t entry_start;
140 };
141 
142 struct wait_exceptional_entry_queue {
143 	wait_queue_entry_t wait;
144 	struct exceptional_entry_key key;
145 };
146 
147 /**
148  * enum dax_wake_mode: waitqueue wakeup behaviour
149  * @WAKE_ALL: wake all waiters in the waitqueue
150  * @WAKE_NEXT: wake only the first waiter in the waitqueue
151  */
152 enum dax_wake_mode {
153 	WAKE_ALL,
154 	WAKE_NEXT,
155 };
156 
157 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
158 		void *entry, struct exceptional_entry_key *key)
159 {
160 	unsigned long hash;
161 	unsigned long index = xas->xa_index;
162 
163 	/*
164 	 * If 'entry' is a PMD, align the 'index' that we use for the wait
165 	 * queue to the start of that PMD.  This ensures that all offsets in
166 	 * the range covered by the PMD map to the same bit lock.
167 	 */
168 	if (dax_is_pmd_entry(entry))
169 		index &= ~PG_PMD_COLOUR;
170 	key->xa = xas->xa;
171 	key->entry_start = index;
172 
173 	hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
174 	return wait_table + hash;
175 }
176 
177 static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
178 		unsigned int mode, int sync, void *keyp)
179 {
180 	struct exceptional_entry_key *key = keyp;
181 	struct wait_exceptional_entry_queue *ewait =
182 		container_of(wait, struct wait_exceptional_entry_queue, wait);
183 
184 	if (key->xa != ewait->key.xa ||
185 	    key->entry_start != ewait->key.entry_start)
186 		return 0;
187 	return autoremove_wake_function(wait, mode, sync, NULL);
188 }
189 
190 /*
191  * @entry may no longer be the entry at the index in the mapping.
192  * The important information it's conveying is whether the entry at
193  * this index used to be a PMD entry.
194  */
195 static void dax_wake_entry(struct xa_state *xas, void *entry,
196 			   enum dax_wake_mode mode)
197 {
198 	struct exceptional_entry_key key;
199 	wait_queue_head_t *wq;
200 
201 	wq = dax_entry_waitqueue(xas, entry, &key);
202 
203 	/*
204 	 * Checking for locked entry and prepare_to_wait_exclusive() happens
205 	 * under the i_pages lock, ditto for entry handling in our callers.
206 	 * So at this point all tasks that could have seen our entry locked
207 	 * must be in the waitqueue and the following check will see them.
208 	 */
209 	if (waitqueue_active(wq))
210 		__wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
211 }
212 
213 /*
214  * Look up entry in page cache, wait for it to become unlocked if it
215  * is a DAX entry and return it.  The caller must subsequently call
216  * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
217  * if it did.  The entry returned may have a larger order than @order.
218  * If @order is larger than the order of the entry found in i_pages, this
219  * function returns a dax_is_conflict entry.
220  *
221  * Must be called with the i_pages lock held.
222  */
223 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
224 {
225 	void *entry;
226 	struct wait_exceptional_entry_queue ewait;
227 	wait_queue_head_t *wq;
228 
229 	init_wait(&ewait.wait);
230 	ewait.wait.func = wake_exceptional_entry_func;
231 
232 	for (;;) {
233 		entry = xas_find_conflict(xas);
234 		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
235 			return entry;
236 		if (dax_entry_order(entry) < order)
237 			return XA_RETRY_ENTRY;
238 		if (!dax_is_locked(entry))
239 			return entry;
240 
241 		wq = dax_entry_waitqueue(xas, entry, &ewait.key);
242 		prepare_to_wait_exclusive(wq, &ewait.wait,
243 					  TASK_UNINTERRUPTIBLE);
244 		xas_unlock_irq(xas);
245 		xas_reset(xas);
246 		schedule();
247 		finish_wait(wq, &ewait.wait);
248 		xas_lock_irq(xas);
249 	}
250 }
251 
252 /*
253  * The only thing keeping the address space around is the i_pages lock
254  * (it's cycled in clear_inode() after removing the entries from i_pages)
255  * After we call xas_unlock_irq(), we cannot touch xas->xa.
256  */
257 static void wait_entry_unlocked(struct xa_state *xas, void *entry)
258 {
259 	struct wait_exceptional_entry_queue ewait;
260 	wait_queue_head_t *wq;
261 
262 	init_wait(&ewait.wait);
263 	ewait.wait.func = wake_exceptional_entry_func;
264 
265 	wq = dax_entry_waitqueue(xas, entry, &ewait.key);
266 	/*
267 	 * Unlike get_unlocked_entry() there is no guarantee that this
268 	 * path ever successfully retrieves an unlocked entry before an
269 	 * inode dies. Perform a non-exclusive wait in case this path
270 	 * never successfully performs its own wake up.
271 	 */
272 	prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
273 	xas_unlock_irq(xas);
274 	schedule();
275 	finish_wait(wq, &ewait.wait);
276 }
277 
278 static void put_unlocked_entry(struct xa_state *xas, void *entry,
279 			       enum dax_wake_mode mode)
280 {
281 	if (entry && !dax_is_conflict(entry))
282 		dax_wake_entry(xas, entry, mode);
283 }
284 
285 /*
286  * We used the xa_state to get the entry, but then we locked the entry and
287  * dropped the xa_lock, so we know the xa_state is stale and must be reset
288  * before use.
289  */
290 static void dax_unlock_entry(struct xa_state *xas, void *entry)
291 {
292 	void *old;
293 
294 	BUG_ON(dax_is_locked(entry));
295 	xas_reset(xas);
296 	xas_lock_irq(xas);
297 	old = xas_store(xas, entry);
298 	xas_unlock_irq(xas);
299 	BUG_ON(!dax_is_locked(old));
300 	dax_wake_entry(xas, entry, WAKE_NEXT);
301 }
302 
303 /*
304  * Return: The entry stored at this location before it was locked.
305  */
306 static void *dax_lock_entry(struct xa_state *xas, void *entry)
307 {
308 	unsigned long v = xa_to_value(entry);
309 	return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
310 }
311 
312 static unsigned long dax_entry_size(void *entry)
313 {
314 	if (dax_is_zero_entry(entry))
315 		return 0;
316 	else if (dax_is_empty_entry(entry))
317 		return 0;
318 	else if (dax_is_pmd_entry(entry))
319 		return PMD_SIZE;
320 	else
321 		return PAGE_SIZE;
322 }
323 
324 static unsigned long dax_end_pfn(void *entry)
325 {
326 	return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
327 }
328 
329 /*
330  * Iterate through all mapped pfns represented by an entry, i.e. skip
331  * 'empty' and 'zero' entries.
332  */
333 #define for_each_mapped_pfn(entry, pfn) \
334 	for (pfn = dax_to_pfn(entry); \
335 			pfn < dax_end_pfn(entry); pfn++)
336 
337 static inline bool dax_page_is_shared(struct page *page)
338 {
339 	return page->mapping == PAGE_MAPPING_DAX_SHARED;
340 }
341 
342 /*
343  * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the
344  * refcount.
345  */
346 static inline void dax_page_share_get(struct page *page)
347 {
348 	if (page->mapping != PAGE_MAPPING_DAX_SHARED) {
349 		/*
350 		 * Reset the index if the page was already mapped
351 		 * regularly before.
352 		 */
353 		if (page->mapping)
354 			page->share = 1;
355 		page->mapping = PAGE_MAPPING_DAX_SHARED;
356 	}
357 	page->share++;
358 }
359 
360 static inline unsigned long dax_page_share_put(struct page *page)
361 {
362 	return --page->share;
363 }
364 
365 /*
366  * When it is called in dax_insert_entry(), the shared flag will indicate that
367  * whether this entry is shared by multiple files.  If so, set the page->mapping
368  * PAGE_MAPPING_DAX_SHARED, and use page->share as refcount.
369  */
370 static void dax_associate_entry(void *entry, struct address_space *mapping,
371 		struct vm_area_struct *vma, unsigned long address, bool shared)
372 {
373 	unsigned long size = dax_entry_size(entry), pfn, index;
374 	int i = 0;
375 
376 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
377 		return;
378 
379 	index = linear_page_index(vma, address & ~(size - 1));
380 	for_each_mapped_pfn(entry, pfn) {
381 		struct page *page = pfn_to_page(pfn);
382 
383 		if (shared) {
384 			dax_page_share_get(page);
385 		} else {
386 			WARN_ON_ONCE(page->mapping);
387 			page->mapping = mapping;
388 			page->index = index + i++;
389 		}
390 	}
391 }
392 
393 static void dax_disassociate_entry(void *entry, struct address_space *mapping,
394 		bool trunc)
395 {
396 	unsigned long pfn;
397 
398 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
399 		return;
400 
401 	for_each_mapped_pfn(entry, pfn) {
402 		struct page *page = pfn_to_page(pfn);
403 
404 		WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
405 		if (dax_page_is_shared(page)) {
406 			/* keep the shared flag if this page is still shared */
407 			if (dax_page_share_put(page) > 0)
408 				continue;
409 		} else
410 			WARN_ON_ONCE(page->mapping && page->mapping != mapping);
411 		page->mapping = NULL;
412 		page->index = 0;
413 	}
414 }
415 
416 static struct page *dax_busy_page(void *entry)
417 {
418 	unsigned long pfn;
419 
420 	for_each_mapped_pfn(entry, pfn) {
421 		struct page *page = pfn_to_page(pfn);
422 
423 		if (page_ref_count(page) > 1)
424 			return page;
425 	}
426 	return NULL;
427 }
428 
429 /*
430  * dax_lock_page - Lock the DAX entry corresponding to a page
431  * @page: The page whose entry we want to lock
432  *
433  * Context: Process context.
434  * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
435  * not be locked.
436  */
437 dax_entry_t dax_lock_page(struct page *page)
438 {
439 	XA_STATE(xas, NULL, 0);
440 	void *entry;
441 
442 	/* Ensure page->mapping isn't freed while we look at it */
443 	rcu_read_lock();
444 	for (;;) {
445 		struct address_space *mapping = READ_ONCE(page->mapping);
446 
447 		entry = NULL;
448 		if (!mapping || !dax_mapping(mapping))
449 			break;
450 
451 		/*
452 		 * In the device-dax case there's no need to lock, a
453 		 * struct dev_pagemap pin is sufficient to keep the
454 		 * inode alive, and we assume we have dev_pagemap pin
455 		 * otherwise we would not have a valid pfn_to_page()
456 		 * translation.
457 		 */
458 		entry = (void *)~0UL;
459 		if (S_ISCHR(mapping->host->i_mode))
460 			break;
461 
462 		xas.xa = &mapping->i_pages;
463 		xas_lock_irq(&xas);
464 		if (mapping != page->mapping) {
465 			xas_unlock_irq(&xas);
466 			continue;
467 		}
468 		xas_set(&xas, page->index);
469 		entry = xas_load(&xas);
470 		if (dax_is_locked(entry)) {
471 			rcu_read_unlock();
472 			wait_entry_unlocked(&xas, entry);
473 			rcu_read_lock();
474 			continue;
475 		}
476 		dax_lock_entry(&xas, entry);
477 		xas_unlock_irq(&xas);
478 		break;
479 	}
480 	rcu_read_unlock();
481 	return (dax_entry_t)entry;
482 }
483 
484 void dax_unlock_page(struct page *page, dax_entry_t cookie)
485 {
486 	struct address_space *mapping = page->mapping;
487 	XA_STATE(xas, &mapping->i_pages, page->index);
488 
489 	if (S_ISCHR(mapping->host->i_mode))
490 		return;
491 
492 	dax_unlock_entry(&xas, (void *)cookie);
493 }
494 
495 /*
496  * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping
497  * @mapping: the file's mapping whose entry we want to lock
498  * @index: the offset within this file
499  * @page: output the dax page corresponding to this dax entry
500  *
501  * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry
502  * could not be locked.
503  */
504 dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index,
505 		struct page **page)
506 {
507 	XA_STATE(xas, NULL, 0);
508 	void *entry;
509 
510 	rcu_read_lock();
511 	for (;;) {
512 		entry = NULL;
513 		if (!dax_mapping(mapping))
514 			break;
515 
516 		xas.xa = &mapping->i_pages;
517 		xas_lock_irq(&xas);
518 		xas_set(&xas, index);
519 		entry = xas_load(&xas);
520 		if (dax_is_locked(entry)) {
521 			rcu_read_unlock();
522 			wait_entry_unlocked(&xas, entry);
523 			rcu_read_lock();
524 			continue;
525 		}
526 		if (!entry ||
527 		    dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
528 			/*
529 			 * Because we are looking for entry from file's mapping
530 			 * and index, so the entry may not be inserted for now,
531 			 * or even a zero/empty entry.  We don't think this is
532 			 * an error case.  So, return a special value and do
533 			 * not output @page.
534 			 */
535 			entry = (void *)~0UL;
536 		} else {
537 			*page = pfn_to_page(dax_to_pfn(entry));
538 			dax_lock_entry(&xas, entry);
539 		}
540 		xas_unlock_irq(&xas);
541 		break;
542 	}
543 	rcu_read_unlock();
544 	return (dax_entry_t)entry;
545 }
546 
547 void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index,
548 		dax_entry_t cookie)
549 {
550 	XA_STATE(xas, &mapping->i_pages, index);
551 
552 	if (cookie == ~0UL)
553 		return;
554 
555 	dax_unlock_entry(&xas, (void *)cookie);
556 }
557 
558 /*
559  * Find page cache entry at given index. If it is a DAX entry, return it
560  * with the entry locked. If the page cache doesn't contain an entry at
561  * that index, add a locked empty entry.
562  *
563  * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
564  * either return that locked entry or will return VM_FAULT_FALLBACK.
565  * This will happen if there are any PTE entries within the PMD range
566  * that we are requesting.
567  *
568  * We always favor PTE entries over PMD entries. There isn't a flow where we
569  * evict PTE entries in order to 'upgrade' them to a PMD entry.  A PMD
570  * insertion will fail if it finds any PTE entries already in the tree, and a
571  * PTE insertion will cause an existing PMD entry to be unmapped and
572  * downgraded to PTE entries.  This happens for both PMD zero pages as
573  * well as PMD empty entries.
574  *
575  * The exception to this downgrade path is for PMD entries that have
576  * real storage backing them.  We will leave these real PMD entries in
577  * the tree, and PTE writes will simply dirty the entire PMD entry.
578  *
579  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
580  * persistent memory the benefit is doubtful. We can add that later if we can
581  * show it helps.
582  *
583  * On error, this function does not return an ERR_PTR.  Instead it returns
584  * a VM_FAULT code, encoded as an xarray internal entry.  The ERR_PTR values
585  * overlap with xarray value entries.
586  */
587 static void *grab_mapping_entry(struct xa_state *xas,
588 		struct address_space *mapping, unsigned int order)
589 {
590 	unsigned long index = xas->xa_index;
591 	bool pmd_downgrade;	/* splitting PMD entry into PTE entries? */
592 	void *entry;
593 
594 retry:
595 	pmd_downgrade = false;
596 	xas_lock_irq(xas);
597 	entry = get_unlocked_entry(xas, order);
598 
599 	if (entry) {
600 		if (dax_is_conflict(entry))
601 			goto fallback;
602 		if (!xa_is_value(entry)) {
603 			xas_set_err(xas, -EIO);
604 			goto out_unlock;
605 		}
606 
607 		if (order == 0) {
608 			if (dax_is_pmd_entry(entry) &&
609 			    (dax_is_zero_entry(entry) ||
610 			     dax_is_empty_entry(entry))) {
611 				pmd_downgrade = true;
612 			}
613 		}
614 	}
615 
616 	if (pmd_downgrade) {
617 		/*
618 		 * Make sure 'entry' remains valid while we drop
619 		 * the i_pages lock.
620 		 */
621 		dax_lock_entry(xas, entry);
622 
623 		/*
624 		 * Besides huge zero pages the only other thing that gets
625 		 * downgraded are empty entries which don't need to be
626 		 * unmapped.
627 		 */
628 		if (dax_is_zero_entry(entry)) {
629 			xas_unlock_irq(xas);
630 			unmap_mapping_pages(mapping,
631 					xas->xa_index & ~PG_PMD_COLOUR,
632 					PG_PMD_NR, false);
633 			xas_reset(xas);
634 			xas_lock_irq(xas);
635 		}
636 
637 		dax_disassociate_entry(entry, mapping, false);
638 		xas_store(xas, NULL);	/* undo the PMD join */
639 		dax_wake_entry(xas, entry, WAKE_ALL);
640 		mapping->nrpages -= PG_PMD_NR;
641 		entry = NULL;
642 		xas_set(xas, index);
643 	}
644 
645 	if (entry) {
646 		dax_lock_entry(xas, entry);
647 	} else {
648 		unsigned long flags = DAX_EMPTY;
649 
650 		if (order > 0)
651 			flags |= DAX_PMD;
652 		entry = dax_make_entry(pfn_to_pfn_t(0), flags);
653 		dax_lock_entry(xas, entry);
654 		if (xas_error(xas))
655 			goto out_unlock;
656 		mapping->nrpages += 1UL << order;
657 	}
658 
659 out_unlock:
660 	xas_unlock_irq(xas);
661 	if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
662 		goto retry;
663 	if (xas->xa_node == XA_ERROR(-ENOMEM))
664 		return xa_mk_internal(VM_FAULT_OOM);
665 	if (xas_error(xas))
666 		return xa_mk_internal(VM_FAULT_SIGBUS);
667 	return entry;
668 fallback:
669 	xas_unlock_irq(xas);
670 	return xa_mk_internal(VM_FAULT_FALLBACK);
671 }
672 
673 /**
674  * dax_layout_busy_page_range - find first pinned page in @mapping
675  * @mapping: address space to scan for a page with ref count > 1
676  * @start: Starting offset. Page containing 'start' is included.
677  * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX,
678  *       pages from 'start' till the end of file are included.
679  *
680  * DAX requires ZONE_DEVICE mapped pages. These pages are never
681  * 'onlined' to the page allocator so they are considered idle when
682  * page->count == 1. A filesystem uses this interface to determine if
683  * any page in the mapping is busy, i.e. for DMA, or other
684  * get_user_pages() usages.
685  *
686  * It is expected that the filesystem is holding locks to block the
687  * establishment of new mappings in this address_space. I.e. it expects
688  * to be able to run unmap_mapping_range() and subsequently not race
689  * mapping_mapped() becoming true.
690  */
691 struct page *dax_layout_busy_page_range(struct address_space *mapping,
692 					loff_t start, loff_t end)
693 {
694 	void *entry;
695 	unsigned int scanned = 0;
696 	struct page *page = NULL;
697 	pgoff_t start_idx = start >> PAGE_SHIFT;
698 	pgoff_t end_idx;
699 	XA_STATE(xas, &mapping->i_pages, start_idx);
700 
701 	/*
702 	 * In the 'limited' case get_user_pages() for dax is disabled.
703 	 */
704 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
705 		return NULL;
706 
707 	if (!dax_mapping(mapping) || !mapping_mapped(mapping))
708 		return NULL;
709 
710 	/* If end == LLONG_MAX, all pages from start to till end of file */
711 	if (end == LLONG_MAX)
712 		end_idx = ULONG_MAX;
713 	else
714 		end_idx = end >> PAGE_SHIFT;
715 	/*
716 	 * If we race get_user_pages_fast() here either we'll see the
717 	 * elevated page count in the iteration and wait, or
718 	 * get_user_pages_fast() will see that the page it took a reference
719 	 * against is no longer mapped in the page tables and bail to the
720 	 * get_user_pages() slow path.  The slow path is protected by
721 	 * pte_lock() and pmd_lock(). New references are not taken without
722 	 * holding those locks, and unmap_mapping_pages() will not zero the
723 	 * pte or pmd without holding the respective lock, so we are
724 	 * guaranteed to either see new references or prevent new
725 	 * references from being established.
726 	 */
727 	unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0);
728 
729 	xas_lock_irq(&xas);
730 	xas_for_each(&xas, entry, end_idx) {
731 		if (WARN_ON_ONCE(!xa_is_value(entry)))
732 			continue;
733 		if (unlikely(dax_is_locked(entry)))
734 			entry = get_unlocked_entry(&xas, 0);
735 		if (entry)
736 			page = dax_busy_page(entry);
737 		put_unlocked_entry(&xas, entry, WAKE_NEXT);
738 		if (page)
739 			break;
740 		if (++scanned % XA_CHECK_SCHED)
741 			continue;
742 
743 		xas_pause(&xas);
744 		xas_unlock_irq(&xas);
745 		cond_resched();
746 		xas_lock_irq(&xas);
747 	}
748 	xas_unlock_irq(&xas);
749 	return page;
750 }
751 EXPORT_SYMBOL_GPL(dax_layout_busy_page_range);
752 
753 struct page *dax_layout_busy_page(struct address_space *mapping)
754 {
755 	return dax_layout_busy_page_range(mapping, 0, LLONG_MAX);
756 }
757 EXPORT_SYMBOL_GPL(dax_layout_busy_page);
758 
759 static int __dax_invalidate_entry(struct address_space *mapping,
760 					  pgoff_t index, bool trunc)
761 {
762 	XA_STATE(xas, &mapping->i_pages, index);
763 	int ret = 0;
764 	void *entry;
765 
766 	xas_lock_irq(&xas);
767 	entry = get_unlocked_entry(&xas, 0);
768 	if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
769 		goto out;
770 	if (!trunc &&
771 	    (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
772 	     xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
773 		goto out;
774 	dax_disassociate_entry(entry, mapping, trunc);
775 	xas_store(&xas, NULL);
776 	mapping->nrpages -= 1UL << dax_entry_order(entry);
777 	ret = 1;
778 out:
779 	put_unlocked_entry(&xas, entry, WAKE_ALL);
780 	xas_unlock_irq(&xas);
781 	return ret;
782 }
783 
784 /*
785  * Delete DAX entry at @index from @mapping.  Wait for it
786  * to be unlocked before deleting it.
787  */
788 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
789 {
790 	int ret = __dax_invalidate_entry(mapping, index, true);
791 
792 	/*
793 	 * This gets called from truncate / punch_hole path. As such, the caller
794 	 * must hold locks protecting against concurrent modifications of the
795 	 * page cache (usually fs-private i_mmap_sem for writing). Since the
796 	 * caller has seen a DAX entry for this index, we better find it
797 	 * at that index as well...
798 	 */
799 	WARN_ON_ONCE(!ret);
800 	return ret;
801 }
802 
803 /*
804  * Invalidate DAX entry if it is clean.
805  */
806 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
807 				      pgoff_t index)
808 {
809 	return __dax_invalidate_entry(mapping, index, false);
810 }
811 
812 static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos)
813 {
814 	return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset);
815 }
816 
817 static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)
818 {
819 	pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos);
820 	void *vto, *kaddr;
821 	long rc;
822 	int id;
823 
824 	id = dax_read_lock();
825 	rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS,
826 				&kaddr, NULL);
827 	if (rc < 0) {
828 		dax_read_unlock(id);
829 		return rc;
830 	}
831 	vto = kmap_atomic(vmf->cow_page);
832 	copy_user_page(vto, kaddr, vmf->address, vmf->cow_page);
833 	kunmap_atomic(vto);
834 	dax_read_unlock(id);
835 	return 0;
836 }
837 
838 /*
839  * MAP_SYNC on a dax mapping guarantees dirty metadata is
840  * flushed on write-faults (non-cow), but not read-faults.
841  */
842 static bool dax_fault_is_synchronous(const struct iomap_iter *iter,
843 		struct vm_area_struct *vma)
844 {
845 	return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) &&
846 		(iter->iomap.flags & IOMAP_F_DIRTY);
847 }
848 
849 /*
850  * By this point grab_mapping_entry() has ensured that we have a locked entry
851  * of the appropriate size so we don't have to worry about downgrading PMDs to
852  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
853  * already in the tree, we will skip the insertion and just dirty the PMD as
854  * appropriate.
855  */
856 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
857 		const struct iomap_iter *iter, void *entry, pfn_t pfn,
858 		unsigned long flags)
859 {
860 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
861 	void *new_entry = dax_make_entry(pfn, flags);
862 	bool write = iter->flags & IOMAP_WRITE;
863 	bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma);
864 	bool shared = iter->iomap.flags & IOMAP_F_SHARED;
865 
866 	if (dirty)
867 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
868 
869 	if (shared || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) {
870 		unsigned long index = xas->xa_index;
871 		/* we are replacing a zero page with block mapping */
872 		if (dax_is_pmd_entry(entry))
873 			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
874 					PG_PMD_NR, false);
875 		else /* pte entry */
876 			unmap_mapping_pages(mapping, index, 1, false);
877 	}
878 
879 	xas_reset(xas);
880 	xas_lock_irq(xas);
881 	if (shared || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
882 		void *old;
883 
884 		dax_disassociate_entry(entry, mapping, false);
885 		dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
886 				shared);
887 		/*
888 		 * Only swap our new entry into the page cache if the current
889 		 * entry is a zero page or an empty entry.  If a normal PTE or
890 		 * PMD entry is already in the cache, we leave it alone.  This
891 		 * means that if we are trying to insert a PTE and the
892 		 * existing entry is a PMD, we will just leave the PMD in the
893 		 * tree and dirty it if necessary.
894 		 */
895 		old = dax_lock_entry(xas, new_entry);
896 		WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
897 					DAX_LOCKED));
898 		entry = new_entry;
899 	} else {
900 		xas_load(xas);	/* Walk the xa_state */
901 	}
902 
903 	if (dirty)
904 		xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
905 
906 	if (write && shared)
907 		xas_set_mark(xas, PAGECACHE_TAG_TOWRITE);
908 
909 	xas_unlock_irq(xas);
910 	return entry;
911 }
912 
913 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
914 		struct address_space *mapping, void *entry)
915 {
916 	unsigned long pfn, index, count, end;
917 	long ret = 0;
918 	struct vm_area_struct *vma;
919 
920 	/*
921 	 * A page got tagged dirty in DAX mapping? Something is seriously
922 	 * wrong.
923 	 */
924 	if (WARN_ON(!xa_is_value(entry)))
925 		return -EIO;
926 
927 	if (unlikely(dax_is_locked(entry))) {
928 		void *old_entry = entry;
929 
930 		entry = get_unlocked_entry(xas, 0);
931 
932 		/* Entry got punched out / reallocated? */
933 		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
934 			goto put_unlocked;
935 		/*
936 		 * Entry got reallocated elsewhere? No need to writeback.
937 		 * We have to compare pfns as we must not bail out due to
938 		 * difference in lockbit or entry type.
939 		 */
940 		if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
941 			goto put_unlocked;
942 		if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
943 					dax_is_zero_entry(entry))) {
944 			ret = -EIO;
945 			goto put_unlocked;
946 		}
947 
948 		/* Another fsync thread may have already done this entry */
949 		if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
950 			goto put_unlocked;
951 	}
952 
953 	/* Lock the entry to serialize with page faults */
954 	dax_lock_entry(xas, entry);
955 
956 	/*
957 	 * We can clear the tag now but we have to be careful so that concurrent
958 	 * dax_writeback_one() calls for the same index cannot finish before we
959 	 * actually flush the caches. This is achieved as the calls will look
960 	 * at the entry only under the i_pages lock and once they do that
961 	 * they will see the entry locked and wait for it to unlock.
962 	 */
963 	xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
964 	xas_unlock_irq(xas);
965 
966 	/*
967 	 * If dax_writeback_mapping_range() was given a wbc->range_start
968 	 * in the middle of a PMD, the 'index' we use needs to be
969 	 * aligned to the start of the PMD.
970 	 * This allows us to flush for PMD_SIZE and not have to worry about
971 	 * partial PMD writebacks.
972 	 */
973 	pfn = dax_to_pfn(entry);
974 	count = 1UL << dax_entry_order(entry);
975 	index = xas->xa_index & ~(count - 1);
976 	end = index + count - 1;
977 
978 	/* Walk all mappings of a given index of a file and writeprotect them */
979 	i_mmap_lock_read(mapping);
980 	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) {
981 		pfn_mkclean_range(pfn, count, index, vma);
982 		cond_resched();
983 	}
984 	i_mmap_unlock_read(mapping);
985 
986 	dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
987 	/*
988 	 * After we have flushed the cache, we can clear the dirty tag. There
989 	 * cannot be new dirty data in the pfn after the flush has completed as
990 	 * the pfn mappings are writeprotected and fault waits for mapping
991 	 * entry lock.
992 	 */
993 	xas_reset(xas);
994 	xas_lock_irq(xas);
995 	xas_store(xas, entry);
996 	xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
997 	dax_wake_entry(xas, entry, WAKE_NEXT);
998 
999 	trace_dax_writeback_one(mapping->host, index, count);
1000 	return ret;
1001 
1002  put_unlocked:
1003 	put_unlocked_entry(xas, entry, WAKE_NEXT);
1004 	return ret;
1005 }
1006 
1007 /*
1008  * Flush the mapping to the persistent domain within the byte range of [start,
1009  * end]. This is required by data integrity operations to ensure file data is
1010  * on persistent storage prior to completion of the operation.
1011  */
1012 int dax_writeback_mapping_range(struct address_space *mapping,
1013 		struct dax_device *dax_dev, struct writeback_control *wbc)
1014 {
1015 	XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
1016 	struct inode *inode = mapping->host;
1017 	pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
1018 	void *entry;
1019 	int ret = 0;
1020 	unsigned int scanned = 0;
1021 
1022 	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
1023 		return -EIO;
1024 
1025 	if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL)
1026 		return 0;
1027 
1028 	trace_dax_writeback_range(inode, xas.xa_index, end_index);
1029 
1030 	tag_pages_for_writeback(mapping, xas.xa_index, end_index);
1031 
1032 	xas_lock_irq(&xas);
1033 	xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
1034 		ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
1035 		if (ret < 0) {
1036 			mapping_set_error(mapping, ret);
1037 			break;
1038 		}
1039 		if (++scanned % XA_CHECK_SCHED)
1040 			continue;
1041 
1042 		xas_pause(&xas);
1043 		xas_unlock_irq(&xas);
1044 		cond_resched();
1045 		xas_lock_irq(&xas);
1046 	}
1047 	xas_unlock_irq(&xas);
1048 	trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
1049 	return ret;
1050 }
1051 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
1052 
1053 static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
1054 		size_t size, void **kaddr, pfn_t *pfnp)
1055 {
1056 	pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1057 	int id, rc = 0;
1058 	long length;
1059 
1060 	id = dax_read_lock();
1061 	length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
1062 				   DAX_ACCESS, kaddr, pfnp);
1063 	if (length < 0) {
1064 		rc = length;
1065 		goto out;
1066 	}
1067 	if (!pfnp)
1068 		goto out_check_addr;
1069 	rc = -EINVAL;
1070 	if (PFN_PHYS(length) < size)
1071 		goto out;
1072 	if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1073 		goto out;
1074 	/* For larger pages we need devmap */
1075 	if (length > 1 && !pfn_t_devmap(*pfnp))
1076 		goto out;
1077 	rc = 0;
1078 
1079 out_check_addr:
1080 	if (!kaddr)
1081 		goto out;
1082 	if (!*kaddr)
1083 		rc = -EFAULT;
1084 out:
1085 	dax_read_unlock(id);
1086 	return rc;
1087 }
1088 
1089 /**
1090  * dax_iomap_copy_around - Prepare for an unaligned write to a shared/cow page
1091  * by copying the data before and after the range to be written.
1092  * @pos:	address to do copy from.
1093  * @length:	size of copy operation.
1094  * @align_size:	aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE)
1095  * @srcmap:	iomap srcmap
1096  * @daddr:	destination address to copy to.
1097  *
1098  * This can be called from two places. Either during DAX write fault (page
1099  * aligned), to copy the length size data to daddr. Or, while doing normal DAX
1100  * write operation, dax_iomap_iter() might call this to do the copy of either
1101  * start or end unaligned address. In the latter case the rest of the copy of
1102  * aligned ranges is taken care by dax_iomap_iter() itself.
1103  * If the srcmap contains invalid data, such as HOLE and UNWRITTEN, zero the
1104  * area to make sure no old data remains.
1105  */
1106 static int dax_iomap_copy_around(loff_t pos, uint64_t length, size_t align_size,
1107 		const struct iomap *srcmap, void *daddr)
1108 {
1109 	loff_t head_off = pos & (align_size - 1);
1110 	size_t size = ALIGN(head_off + length, align_size);
1111 	loff_t end = pos + length;
1112 	loff_t pg_end = round_up(end, align_size);
1113 	/* copy_all is usually in page fault case */
1114 	bool copy_all = head_off == 0 && end == pg_end;
1115 	/* zero the edges if srcmap is a HOLE or IOMAP_UNWRITTEN */
1116 	bool zero_edge = srcmap->flags & IOMAP_F_SHARED ||
1117 			 srcmap->type == IOMAP_UNWRITTEN;
1118 	void *saddr = 0;
1119 	int ret = 0;
1120 
1121 	if (!zero_edge) {
1122 		ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL);
1123 		if (ret)
1124 			return ret;
1125 	}
1126 
1127 	if (copy_all) {
1128 		if (zero_edge)
1129 			memset(daddr, 0, size);
1130 		else
1131 			ret = copy_mc_to_kernel(daddr, saddr, length);
1132 		goto out;
1133 	}
1134 
1135 	/* Copy the head part of the range */
1136 	if (head_off) {
1137 		if (zero_edge)
1138 			memset(daddr, 0, head_off);
1139 		else {
1140 			ret = copy_mc_to_kernel(daddr, saddr, head_off);
1141 			if (ret)
1142 				return -EIO;
1143 		}
1144 	}
1145 
1146 	/* Copy the tail part of the range */
1147 	if (end < pg_end) {
1148 		loff_t tail_off = head_off + length;
1149 		loff_t tail_len = pg_end - end;
1150 
1151 		if (zero_edge)
1152 			memset(daddr + tail_off, 0, tail_len);
1153 		else {
1154 			ret = copy_mc_to_kernel(daddr + tail_off,
1155 						saddr + tail_off, tail_len);
1156 			if (ret)
1157 				return -EIO;
1158 		}
1159 	}
1160 out:
1161 	if (zero_edge)
1162 		dax_flush(srcmap->dax_dev, daddr, size);
1163 	return ret ? -EIO : 0;
1164 }
1165 
1166 /*
1167  * The user has performed a load from a hole in the file.  Allocating a new
1168  * page in the file would cause excessive storage usage for workloads with
1169  * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
1170  * If this page is ever written to we will re-fault and change the mapping to
1171  * point to real DAX storage instead.
1172  */
1173 static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1174 		const struct iomap_iter *iter, void **entry)
1175 {
1176 	struct inode *inode = iter->inode;
1177 	unsigned long vaddr = vmf->address;
1178 	pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1179 	vm_fault_t ret;
1180 
1181 	*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
1182 
1183 	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1184 	trace_dax_load_hole(inode, vmf, ret);
1185 	return ret;
1186 }
1187 
1188 #ifdef CONFIG_FS_DAX_PMD
1189 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1190 		const struct iomap_iter *iter, void **entry)
1191 {
1192 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1193 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1194 	struct vm_area_struct *vma = vmf->vma;
1195 	struct inode *inode = mapping->host;
1196 	pgtable_t pgtable = NULL;
1197 	struct page *zero_page;
1198 	spinlock_t *ptl;
1199 	pmd_t pmd_entry;
1200 	pfn_t pfn;
1201 
1202 	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1203 
1204 	if (unlikely(!zero_page))
1205 		goto fallback;
1206 
1207 	pfn = page_to_pfn_t(zero_page);
1208 	*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
1209 				  DAX_PMD | DAX_ZERO_PAGE);
1210 
1211 	if (arch_needs_pgtable_deposit()) {
1212 		pgtable = pte_alloc_one(vma->vm_mm);
1213 		if (!pgtable)
1214 			return VM_FAULT_OOM;
1215 	}
1216 
1217 	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1218 	if (!pmd_none(*(vmf->pmd))) {
1219 		spin_unlock(ptl);
1220 		goto fallback;
1221 	}
1222 
1223 	if (pgtable) {
1224 		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1225 		mm_inc_nr_ptes(vma->vm_mm);
1226 	}
1227 	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1228 	pmd_entry = pmd_mkhuge(pmd_entry);
1229 	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1230 	spin_unlock(ptl);
1231 	trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
1232 	return VM_FAULT_NOPAGE;
1233 
1234 fallback:
1235 	if (pgtable)
1236 		pte_free(vma->vm_mm, pgtable);
1237 	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
1238 	return VM_FAULT_FALLBACK;
1239 }
1240 #else
1241 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1242 		const struct iomap_iter *iter, void **entry)
1243 {
1244 	return VM_FAULT_FALLBACK;
1245 }
1246 #endif /* CONFIG_FS_DAX_PMD */
1247 
1248 static s64 dax_unshare_iter(struct iomap_iter *iter)
1249 {
1250 	struct iomap *iomap = &iter->iomap;
1251 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
1252 	loff_t pos = iter->pos;
1253 	loff_t length = iomap_length(iter);
1254 	int id = 0;
1255 	s64 ret = 0;
1256 	void *daddr = NULL, *saddr = NULL;
1257 
1258 	/* don't bother with blocks that are not shared to start with */
1259 	if (!(iomap->flags & IOMAP_F_SHARED))
1260 		return length;
1261 	/* don't bother with holes or unwritten extents */
1262 	if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1263 		return length;
1264 
1265 	id = dax_read_lock();
1266 	ret = dax_iomap_direct_access(iomap, pos, length, &daddr, NULL);
1267 	if (ret < 0)
1268 		goto out_unlock;
1269 
1270 	ret = dax_iomap_direct_access(srcmap, pos, length, &saddr, NULL);
1271 	if (ret < 0)
1272 		goto out_unlock;
1273 
1274 	if (copy_mc_to_kernel(daddr, saddr, length) == 0)
1275 		ret = length;
1276 	else
1277 		ret = -EIO;
1278 
1279 out_unlock:
1280 	dax_read_unlock(id);
1281 	return ret;
1282 }
1283 
1284 int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
1285 		const struct iomap_ops *ops)
1286 {
1287 	struct iomap_iter iter = {
1288 		.inode		= inode,
1289 		.pos		= pos,
1290 		.len		= len,
1291 		.flags		= IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX,
1292 	};
1293 	int ret;
1294 
1295 	while ((ret = iomap_iter(&iter, ops)) > 0)
1296 		iter.processed = dax_unshare_iter(&iter);
1297 	return ret;
1298 }
1299 EXPORT_SYMBOL_GPL(dax_file_unshare);
1300 
1301 static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size)
1302 {
1303 	const struct iomap *iomap = &iter->iomap;
1304 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
1305 	unsigned offset = offset_in_page(pos);
1306 	pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1307 	void *kaddr;
1308 	long ret;
1309 
1310 	ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr,
1311 				NULL);
1312 	if (ret < 0)
1313 		return ret;
1314 	memset(kaddr + offset, 0, size);
1315 	if (iomap->flags & IOMAP_F_SHARED)
1316 		ret = dax_iomap_copy_around(pos, size, PAGE_SIZE, srcmap,
1317 					    kaddr);
1318 	else
1319 		dax_flush(iomap->dax_dev, kaddr + offset, size);
1320 	return ret;
1321 }
1322 
1323 static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
1324 {
1325 	const struct iomap *iomap = &iter->iomap;
1326 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
1327 	loff_t pos = iter->pos;
1328 	u64 length = iomap_length(iter);
1329 	s64 written = 0;
1330 
1331 	/* already zeroed?  we're done. */
1332 	if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1333 		return length;
1334 
1335 	/*
1336 	 * invalidate the pages whose sharing state is to be changed
1337 	 * because of CoW.
1338 	 */
1339 	if (iomap->flags & IOMAP_F_SHARED)
1340 		invalidate_inode_pages2_range(iter->inode->i_mapping,
1341 					      pos >> PAGE_SHIFT,
1342 					      (pos + length - 1) >> PAGE_SHIFT);
1343 
1344 	do {
1345 		unsigned offset = offset_in_page(pos);
1346 		unsigned size = min_t(u64, PAGE_SIZE - offset, length);
1347 		pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1348 		long rc;
1349 		int id;
1350 
1351 		id = dax_read_lock();
1352 		if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
1353 			rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
1354 		else
1355 			rc = dax_memzero(iter, pos, size);
1356 		dax_read_unlock(id);
1357 
1358 		if (rc < 0)
1359 			return rc;
1360 		pos += size;
1361 		length -= size;
1362 		written += size;
1363 	} while (length > 0);
1364 
1365 	if (did_zero)
1366 		*did_zero = true;
1367 	return written;
1368 }
1369 
1370 int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1371 		const struct iomap_ops *ops)
1372 {
1373 	struct iomap_iter iter = {
1374 		.inode		= inode,
1375 		.pos		= pos,
1376 		.len		= len,
1377 		.flags		= IOMAP_DAX | IOMAP_ZERO,
1378 	};
1379 	int ret;
1380 
1381 	while ((ret = iomap_iter(&iter, ops)) > 0)
1382 		iter.processed = dax_zero_iter(&iter, did_zero);
1383 	return ret;
1384 }
1385 EXPORT_SYMBOL_GPL(dax_zero_range);
1386 
1387 int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1388 		const struct iomap_ops *ops)
1389 {
1390 	unsigned int blocksize = i_blocksize(inode);
1391 	unsigned int off = pos & (blocksize - 1);
1392 
1393 	/* Block boundary? Nothing to do */
1394 	if (!off)
1395 		return 0;
1396 	return dax_zero_range(inode, pos, blocksize - off, did_zero, ops);
1397 }
1398 EXPORT_SYMBOL_GPL(dax_truncate_page);
1399 
1400 static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
1401 		struct iov_iter *iter)
1402 {
1403 	const struct iomap *iomap = &iomi->iomap;
1404 	const struct iomap *srcmap = iomap_iter_srcmap(iomi);
1405 	loff_t length = iomap_length(iomi);
1406 	loff_t pos = iomi->pos;
1407 	struct dax_device *dax_dev = iomap->dax_dev;
1408 	loff_t end = pos + length, done = 0;
1409 	bool write = iov_iter_rw(iter) == WRITE;
1410 	bool cow = write && iomap->flags & IOMAP_F_SHARED;
1411 	ssize_t ret = 0;
1412 	size_t xfer;
1413 	int id;
1414 
1415 	if (!write) {
1416 		end = min(end, i_size_read(iomi->inode));
1417 		if (pos >= end)
1418 			return 0;
1419 
1420 		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1421 			return iov_iter_zero(min(length, end - pos), iter);
1422 	}
1423 
1424 	/*
1425 	 * In DAX mode, enforce either pure overwrites of written extents, or
1426 	 * writes to unwritten extents as part of a copy-on-write operation.
1427 	 */
1428 	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED &&
1429 			!(iomap->flags & IOMAP_F_SHARED)))
1430 		return -EIO;
1431 
1432 	/*
1433 	 * Write can allocate block for an area which has a hole page mapped
1434 	 * into page tables. We have to tear down these mappings so that data
1435 	 * written by write(2) is visible in mmap.
1436 	 */
1437 	if (iomap->flags & IOMAP_F_NEW || cow) {
1438 		invalidate_inode_pages2_range(iomi->inode->i_mapping,
1439 					      pos >> PAGE_SHIFT,
1440 					      (end - 1) >> PAGE_SHIFT);
1441 	}
1442 
1443 	id = dax_read_lock();
1444 	while (pos < end) {
1445 		unsigned offset = pos & (PAGE_SIZE - 1);
1446 		const size_t size = ALIGN(length + offset, PAGE_SIZE);
1447 		pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1448 		ssize_t map_len;
1449 		bool recovery = false;
1450 		void *kaddr;
1451 
1452 		if (fatal_signal_pending(current)) {
1453 			ret = -EINTR;
1454 			break;
1455 		}
1456 
1457 		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1458 				DAX_ACCESS, &kaddr, NULL);
1459 		if (map_len == -EIO && iov_iter_rw(iter) == WRITE) {
1460 			map_len = dax_direct_access(dax_dev, pgoff,
1461 					PHYS_PFN(size), DAX_RECOVERY_WRITE,
1462 					&kaddr, NULL);
1463 			if (map_len > 0)
1464 				recovery = true;
1465 		}
1466 		if (map_len < 0) {
1467 			ret = map_len;
1468 			break;
1469 		}
1470 
1471 		if (cow) {
1472 			ret = dax_iomap_copy_around(pos, length, PAGE_SIZE,
1473 						    srcmap, kaddr);
1474 			if (ret)
1475 				break;
1476 		}
1477 
1478 		map_len = PFN_PHYS(map_len);
1479 		kaddr += offset;
1480 		map_len -= offset;
1481 		if (map_len > end - pos)
1482 			map_len = end - pos;
1483 
1484 		if (recovery)
1485 			xfer = dax_recovery_write(dax_dev, pgoff, kaddr,
1486 					map_len, iter);
1487 		else if (write)
1488 			xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1489 					map_len, iter);
1490 		else
1491 			xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1492 					map_len, iter);
1493 
1494 		pos += xfer;
1495 		length -= xfer;
1496 		done += xfer;
1497 
1498 		if (xfer == 0)
1499 			ret = -EFAULT;
1500 		if (xfer < map_len)
1501 			break;
1502 	}
1503 	dax_read_unlock(id);
1504 
1505 	return done ? done : ret;
1506 }
1507 
1508 /**
1509  * dax_iomap_rw - Perform I/O to a DAX file
1510  * @iocb:	The control block for this I/O
1511  * @iter:	The addresses to do I/O from or to
1512  * @ops:	iomap ops passed from the file system
1513  *
1514  * This function performs read and write operations to directly mapped
1515  * persistent memory.  The callers needs to take care of read/write exclusion
1516  * and evicting any page cache pages in the region under I/O.
1517  */
1518 ssize_t
1519 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1520 		const struct iomap_ops *ops)
1521 {
1522 	struct iomap_iter iomi = {
1523 		.inode		= iocb->ki_filp->f_mapping->host,
1524 		.pos		= iocb->ki_pos,
1525 		.len		= iov_iter_count(iter),
1526 		.flags		= IOMAP_DAX,
1527 	};
1528 	loff_t done = 0;
1529 	int ret;
1530 
1531 	if (!iomi.len)
1532 		return 0;
1533 
1534 	if (iov_iter_rw(iter) == WRITE) {
1535 		lockdep_assert_held_write(&iomi.inode->i_rwsem);
1536 		iomi.flags |= IOMAP_WRITE;
1537 	} else {
1538 		lockdep_assert_held(&iomi.inode->i_rwsem);
1539 	}
1540 
1541 	if (iocb->ki_flags & IOCB_NOWAIT)
1542 		iomi.flags |= IOMAP_NOWAIT;
1543 
1544 	while ((ret = iomap_iter(&iomi, ops)) > 0)
1545 		iomi.processed = dax_iomap_iter(&iomi, iter);
1546 
1547 	done = iomi.pos - iocb->ki_pos;
1548 	iocb->ki_pos = iomi.pos;
1549 	return done ? done : ret;
1550 }
1551 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1552 
1553 static vm_fault_t dax_fault_return(int error)
1554 {
1555 	if (error == 0)
1556 		return VM_FAULT_NOPAGE;
1557 	return vmf_error(error);
1558 }
1559 
1560 /*
1561  * When handling a synchronous page fault and the inode need a fsync, we can
1562  * insert the PTE/PMD into page tables only after that fsync happened. Skip
1563  * insertion for now and return the pfn so that caller can insert it after the
1564  * fsync is done.
1565  */
1566 static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
1567 {
1568 	if (WARN_ON_ONCE(!pfnp))
1569 		return VM_FAULT_SIGBUS;
1570 	*pfnp = pfn;
1571 	return VM_FAULT_NEEDDSYNC;
1572 }
1573 
1574 static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
1575 		const struct iomap_iter *iter)
1576 {
1577 	vm_fault_t ret;
1578 	int error = 0;
1579 
1580 	switch (iter->iomap.type) {
1581 	case IOMAP_HOLE:
1582 	case IOMAP_UNWRITTEN:
1583 		clear_user_highpage(vmf->cow_page, vmf->address);
1584 		break;
1585 	case IOMAP_MAPPED:
1586 		error = copy_cow_page_dax(vmf, iter);
1587 		break;
1588 	default:
1589 		WARN_ON_ONCE(1);
1590 		error = -EIO;
1591 		break;
1592 	}
1593 
1594 	if (error)
1595 		return dax_fault_return(error);
1596 
1597 	__SetPageUptodate(vmf->cow_page);
1598 	ret = finish_fault(vmf);
1599 	if (!ret)
1600 		return VM_FAULT_DONE_COW;
1601 	return ret;
1602 }
1603 
1604 /**
1605  * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault.
1606  * @vmf:	vm fault instance
1607  * @iter:	iomap iter
1608  * @pfnp:	pfn to be returned
1609  * @xas:	the dax mapping tree of a file
1610  * @entry:	an unlocked dax entry to be inserted
1611  * @pmd:	distinguish whether it is a pmd fault
1612  */
1613 static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
1614 		const struct iomap_iter *iter, pfn_t *pfnp,
1615 		struct xa_state *xas, void **entry, bool pmd)
1616 {
1617 	const struct iomap *iomap = &iter->iomap;
1618 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
1619 	size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
1620 	loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
1621 	bool write = iter->flags & IOMAP_WRITE;
1622 	unsigned long entry_flags = pmd ? DAX_PMD : 0;
1623 	int err = 0;
1624 	pfn_t pfn;
1625 	void *kaddr;
1626 
1627 	if (!pmd && vmf->cow_page)
1628 		return dax_fault_cow_page(vmf, iter);
1629 
1630 	/* if we are reading UNWRITTEN and HOLE, return a hole. */
1631 	if (!write &&
1632 	    (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
1633 		if (!pmd)
1634 			return dax_load_hole(xas, vmf, iter, entry);
1635 		return dax_pmd_load_hole(xas, vmf, iter, entry);
1636 	}
1637 
1638 	if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) {
1639 		WARN_ON_ONCE(1);
1640 		return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
1641 	}
1642 
1643 	err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn);
1644 	if (err)
1645 		return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);
1646 
1647 	*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags);
1648 
1649 	if (write && iomap->flags & IOMAP_F_SHARED) {
1650 		err = dax_iomap_copy_around(pos, size, size, srcmap, kaddr);
1651 		if (err)
1652 			return dax_fault_return(err);
1653 	}
1654 
1655 	if (dax_fault_is_synchronous(iter, vmf->vma))
1656 		return dax_fault_synchronous_pfnp(pfnp, pfn);
1657 
1658 	/* insert PMD pfn */
1659 	if (pmd)
1660 		return vmf_insert_pfn_pmd(vmf, pfn, write);
1661 
1662 	/* insert PTE pfn */
1663 	if (write)
1664 		return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1665 	return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
1666 }
1667 
1668 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1669 			       int *iomap_errp, const struct iomap_ops *ops)
1670 {
1671 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1672 	XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
1673 	struct iomap_iter iter = {
1674 		.inode		= mapping->host,
1675 		.pos		= (loff_t)vmf->pgoff << PAGE_SHIFT,
1676 		.len		= PAGE_SIZE,
1677 		.flags		= IOMAP_DAX | IOMAP_FAULT,
1678 	};
1679 	vm_fault_t ret = 0;
1680 	void *entry;
1681 	int error;
1682 
1683 	trace_dax_pte_fault(iter.inode, vmf, ret);
1684 	/*
1685 	 * Check whether offset isn't beyond end of file now. Caller is supposed
1686 	 * to hold locks serializing us with truncate / punch hole so this is
1687 	 * a reliable test.
1688 	 */
1689 	if (iter.pos >= i_size_read(iter.inode)) {
1690 		ret = VM_FAULT_SIGBUS;
1691 		goto out;
1692 	}
1693 
1694 	if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1695 		iter.flags |= IOMAP_WRITE;
1696 
1697 	entry = grab_mapping_entry(&xas, mapping, 0);
1698 	if (xa_is_internal(entry)) {
1699 		ret = xa_to_internal(entry);
1700 		goto out;
1701 	}
1702 
1703 	/*
1704 	 * It is possible, particularly with mixed reads & writes to private
1705 	 * mappings, that we have raced with a PMD fault that overlaps with
1706 	 * the PTE we need to set up.  If so just return and the fault will be
1707 	 * retried.
1708 	 */
1709 	if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1710 		ret = VM_FAULT_NOPAGE;
1711 		goto unlock_entry;
1712 	}
1713 
1714 	while ((error = iomap_iter(&iter, ops)) > 0) {
1715 		if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) {
1716 			iter.processed = -EIO;	/* fs corruption? */
1717 			continue;
1718 		}
1719 
1720 		ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false);
1721 		if (ret != VM_FAULT_SIGBUS &&
1722 		    (iter.iomap.flags & IOMAP_F_NEW)) {
1723 			count_vm_event(PGMAJFAULT);
1724 			count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
1725 			ret |= VM_FAULT_MAJOR;
1726 		}
1727 
1728 		if (!(ret & VM_FAULT_ERROR))
1729 			iter.processed = PAGE_SIZE;
1730 	}
1731 
1732 	if (iomap_errp)
1733 		*iomap_errp = error;
1734 	if (!ret && error)
1735 		ret = dax_fault_return(error);
1736 
1737 unlock_entry:
1738 	dax_unlock_entry(&xas, entry);
1739 out:
1740 	trace_dax_pte_fault_done(iter.inode, vmf, ret);
1741 	return ret;
1742 }
1743 
1744 #ifdef CONFIG_FS_DAX_PMD
1745 static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
1746 		pgoff_t max_pgoff)
1747 {
1748 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1749 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1750 
1751 	/*
1752 	 * Make sure that the faulting address's PMD offset (color) matches
1753 	 * the PMD offset from the start of the file.  This is necessary so
1754 	 * that a PMD range in the page table overlaps exactly with a PMD
1755 	 * range in the page cache.
1756 	 */
1757 	if ((vmf->pgoff & PG_PMD_COLOUR) !=
1758 	    ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1759 		return true;
1760 
1761 	/* Fall back to PTEs if we're going to COW */
1762 	if (write && !(vmf->vma->vm_flags & VM_SHARED))
1763 		return true;
1764 
1765 	/* If the PMD would extend outside the VMA */
1766 	if (pmd_addr < vmf->vma->vm_start)
1767 		return true;
1768 	if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
1769 		return true;
1770 
1771 	/* If the PMD would extend beyond the file size */
1772 	if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff)
1773 		return true;
1774 
1775 	return false;
1776 }
1777 
1778 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1779 			       const struct iomap_ops *ops)
1780 {
1781 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1782 	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
1783 	struct iomap_iter iter = {
1784 		.inode		= mapping->host,
1785 		.len		= PMD_SIZE,
1786 		.flags		= IOMAP_DAX | IOMAP_FAULT,
1787 	};
1788 	vm_fault_t ret = VM_FAULT_FALLBACK;
1789 	pgoff_t max_pgoff;
1790 	void *entry;
1791 	int error;
1792 
1793 	if (vmf->flags & FAULT_FLAG_WRITE)
1794 		iter.flags |= IOMAP_WRITE;
1795 
1796 	/*
1797 	 * Check whether offset isn't beyond end of file now. Caller is
1798 	 * supposed to hold locks serializing us with truncate / punch hole so
1799 	 * this is a reliable test.
1800 	 */
1801 	max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE);
1802 
1803 	trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0);
1804 
1805 	if (xas.xa_index >= max_pgoff) {
1806 		ret = VM_FAULT_SIGBUS;
1807 		goto out;
1808 	}
1809 
1810 	if (dax_fault_check_fallback(vmf, &xas, max_pgoff))
1811 		goto fallback;
1812 
1813 	/*
1814 	 * grab_mapping_entry() will make sure we get an empty PMD entry,
1815 	 * a zero PMD entry or a DAX PMD.  If it can't (because a PTE
1816 	 * entry is already in the array, for instance), it will return
1817 	 * VM_FAULT_FALLBACK.
1818 	 */
1819 	entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
1820 	if (xa_is_internal(entry)) {
1821 		ret = xa_to_internal(entry);
1822 		goto fallback;
1823 	}
1824 
1825 	/*
1826 	 * It is possible, particularly with mixed reads & writes to private
1827 	 * mappings, that we have raced with a PTE fault that overlaps with
1828 	 * the PMD we need to set up.  If so just return and the fault will be
1829 	 * retried.
1830 	 */
1831 	if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1832 			!pmd_devmap(*vmf->pmd)) {
1833 		ret = 0;
1834 		goto unlock_entry;
1835 	}
1836 
1837 	iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT;
1838 	while ((error = iomap_iter(&iter, ops)) > 0) {
1839 		if (iomap_length(&iter) < PMD_SIZE)
1840 			continue; /* actually breaks out of the loop */
1841 
1842 		ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
1843 		if (ret != VM_FAULT_FALLBACK)
1844 			iter.processed = PMD_SIZE;
1845 	}
1846 
1847 unlock_entry:
1848 	dax_unlock_entry(&xas, entry);
1849 fallback:
1850 	if (ret == VM_FAULT_FALLBACK) {
1851 		split_huge_pmd(vmf->vma, vmf->pmd, vmf->address);
1852 		count_vm_event(THP_FAULT_FALLBACK);
1853 	}
1854 out:
1855 	trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret);
1856 	return ret;
1857 }
1858 #else
1859 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1860 			       const struct iomap_ops *ops)
1861 {
1862 	return VM_FAULT_FALLBACK;
1863 }
1864 #endif /* CONFIG_FS_DAX_PMD */
1865 
1866 /**
1867  * dax_iomap_fault - handle a page fault on a DAX file
1868  * @vmf: The description of the fault
1869  * @pe_size: Size of the page to fault in
1870  * @pfnp: PFN to insert for synchronous faults if fsync is required
1871  * @iomap_errp: Storage for detailed error code in case of error
1872  * @ops: Iomap ops passed from the file system
1873  *
1874  * When a page fault occurs, filesystems may call this helper in
1875  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1876  * has done all the necessary locking for page fault to proceed
1877  * successfully.
1878  */
1879 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1880 		    pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1881 {
1882 	switch (pe_size) {
1883 	case PE_SIZE_PTE:
1884 		return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1885 	case PE_SIZE_PMD:
1886 		return dax_iomap_pmd_fault(vmf, pfnp, ops);
1887 	default:
1888 		return VM_FAULT_FALLBACK;
1889 	}
1890 }
1891 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1892 
1893 /*
1894  * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1895  * @vmf: The description of the fault
1896  * @pfn: PFN to insert
1897  * @order: Order of entry to insert.
1898  *
1899  * This function inserts a writeable PTE or PMD entry into the page tables
1900  * for an mmaped DAX file.  It also marks the page cache entry as dirty.
1901  */
1902 static vm_fault_t
1903 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1904 {
1905 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1906 	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1907 	void *entry;
1908 	vm_fault_t ret;
1909 
1910 	xas_lock_irq(&xas);
1911 	entry = get_unlocked_entry(&xas, order);
1912 	/* Did we race with someone splitting entry or so? */
1913 	if (!entry || dax_is_conflict(entry) ||
1914 	    (order == 0 && !dax_is_pte_entry(entry))) {
1915 		put_unlocked_entry(&xas, entry, WAKE_NEXT);
1916 		xas_unlock_irq(&xas);
1917 		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1918 						      VM_FAULT_NOPAGE);
1919 		return VM_FAULT_NOPAGE;
1920 	}
1921 	xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1922 	dax_lock_entry(&xas, entry);
1923 	xas_unlock_irq(&xas);
1924 	if (order == 0)
1925 		ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1926 #ifdef CONFIG_FS_DAX_PMD
1927 	else if (order == PMD_ORDER)
1928 		ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
1929 #endif
1930 	else
1931 		ret = VM_FAULT_FALLBACK;
1932 	dax_unlock_entry(&xas, entry);
1933 	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1934 	return ret;
1935 }
1936 
1937 /**
1938  * dax_finish_sync_fault - finish synchronous page fault
1939  * @vmf: The description of the fault
1940  * @pe_size: Size of entry to be inserted
1941  * @pfn: PFN to insert
1942  *
1943  * This function ensures that the file range touched by the page fault is
1944  * stored persistently on the media and handles inserting of appropriate page
1945  * table entry.
1946  */
1947 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1948 		enum page_entry_size pe_size, pfn_t pfn)
1949 {
1950 	int err;
1951 	loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1952 	unsigned int order = pe_order(pe_size);
1953 	size_t len = PAGE_SIZE << order;
1954 
1955 	err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1956 	if (err)
1957 		return VM_FAULT_SIGBUS;
1958 	return dax_insert_pfn_mkwrite(vmf, pfn, order);
1959 }
1960 EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
1961 
1962 static loff_t dax_range_compare_iter(struct iomap_iter *it_src,
1963 		struct iomap_iter *it_dest, u64 len, bool *same)
1964 {
1965 	const struct iomap *smap = &it_src->iomap;
1966 	const struct iomap *dmap = &it_dest->iomap;
1967 	loff_t pos1 = it_src->pos, pos2 = it_dest->pos;
1968 	void *saddr, *daddr;
1969 	int id, ret;
1970 
1971 	len = min(len, min(smap->length, dmap->length));
1972 
1973 	if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) {
1974 		*same = true;
1975 		return len;
1976 	}
1977 
1978 	if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) {
1979 		*same = false;
1980 		return 0;
1981 	}
1982 
1983 	id = dax_read_lock();
1984 	ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE),
1985 				      &saddr, NULL);
1986 	if (ret < 0)
1987 		goto out_unlock;
1988 
1989 	ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE),
1990 				      &daddr, NULL);
1991 	if (ret < 0)
1992 		goto out_unlock;
1993 
1994 	*same = !memcmp(saddr, daddr, len);
1995 	if (!*same)
1996 		len = 0;
1997 	dax_read_unlock(id);
1998 	return len;
1999 
2000 out_unlock:
2001 	dax_read_unlock(id);
2002 	return -EIO;
2003 }
2004 
2005 int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
2006 		struct inode *dst, loff_t dstoff, loff_t len, bool *same,
2007 		const struct iomap_ops *ops)
2008 {
2009 	struct iomap_iter src_iter = {
2010 		.inode		= src,
2011 		.pos		= srcoff,
2012 		.len		= len,
2013 		.flags		= IOMAP_DAX,
2014 	};
2015 	struct iomap_iter dst_iter = {
2016 		.inode		= dst,
2017 		.pos		= dstoff,
2018 		.len		= len,
2019 		.flags		= IOMAP_DAX,
2020 	};
2021 	int ret, compared = 0;
2022 
2023 	while ((ret = iomap_iter(&src_iter, ops)) > 0 &&
2024 	       (ret = iomap_iter(&dst_iter, ops)) > 0) {
2025 		compared = dax_range_compare_iter(&src_iter, &dst_iter, len,
2026 						  same);
2027 		if (compared < 0)
2028 			return ret;
2029 		src_iter.processed = dst_iter.processed = compared;
2030 	}
2031 	return ret;
2032 }
2033 
2034 int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in,
2035 			      struct file *file_out, loff_t pos_out,
2036 			      loff_t *len, unsigned int remap_flags,
2037 			      const struct iomap_ops *ops)
2038 {
2039 	return __generic_remap_file_range_prep(file_in, pos_in, file_out,
2040 					       pos_out, len, remap_flags, ops);
2041 }
2042 EXPORT_SYMBOL_GPL(dax_remap_file_range_prep);
2043