xref: /linux/fs/dax.c (revision 22d55f02b8922a097cd4be1e2f131dfa7ef65901)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * fs/dax.c - Direct Access filesystem code
4  * Copyright (c) 2013-2014 Intel Corporation
5  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
6  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7  */
8 
9 #include <linux/atomic.h>
10 #include <linux/blkdev.h>
11 #include <linux/buffer_head.h>
12 #include <linux/dax.h>
13 #include <linux/fs.h>
14 #include <linux/genhd.h>
15 #include <linux/highmem.h>
16 #include <linux/memcontrol.h>
17 #include <linux/mm.h>
18 #include <linux/mutex.h>
19 #include <linux/pagevec.h>
20 #include <linux/sched.h>
21 #include <linux/sched/signal.h>
22 #include <linux/uio.h>
23 #include <linux/vmstat.h>
24 #include <linux/pfn_t.h>
25 #include <linux/sizes.h>
26 #include <linux/mmu_notifier.h>
27 #include <linux/iomap.h>
28 #include <asm/pgalloc.h>
29 #include "internal.h"
30 
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/fs_dax.h>
33 
34 static inline unsigned int pe_order(enum page_entry_size pe_size)
35 {
36 	if (pe_size == PE_SIZE_PTE)
37 		return PAGE_SHIFT - PAGE_SHIFT;
38 	if (pe_size == PE_SIZE_PMD)
39 		return PMD_SHIFT - PAGE_SHIFT;
40 	if (pe_size == PE_SIZE_PUD)
41 		return PUD_SHIFT - PAGE_SHIFT;
42 	return ~0;
43 }
44 
45 /* We choose 4096 entries - same as per-zone page wait tables */
46 #define DAX_WAIT_TABLE_BITS 12
47 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
48 
49 /* The 'colour' (ie low bits) within a PMD of a page offset.  */
50 #define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
51 #define PG_PMD_NR	(PMD_SIZE >> PAGE_SHIFT)
52 
53 /* The order of a PMD entry */
54 #define PMD_ORDER	(PMD_SHIFT - PAGE_SHIFT)
55 
56 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
57 
58 static int __init init_dax_wait_table(void)
59 {
60 	int i;
61 
62 	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
63 		init_waitqueue_head(wait_table + i);
64 	return 0;
65 }
66 fs_initcall(init_dax_wait_table);
67 
68 /*
69  * DAX pagecache entries use XArray value entries so they can't be mistaken
70  * for pages.  We use one bit for locking, one bit for the entry size (PMD)
71  * and two more to tell us if the entry is a zero page or an empty entry that
72  * is just used for locking.  In total four special bits.
73  *
74  * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
75  * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
76  * block allocation.
77  */
78 #define DAX_SHIFT	(4)
79 #define DAX_LOCKED	(1UL << 0)
80 #define DAX_PMD		(1UL << 1)
81 #define DAX_ZERO_PAGE	(1UL << 2)
82 #define DAX_EMPTY	(1UL << 3)
83 
84 static unsigned long dax_to_pfn(void *entry)
85 {
86 	return xa_to_value(entry) >> DAX_SHIFT;
87 }
88 
89 static void *dax_make_entry(pfn_t pfn, unsigned long flags)
90 {
91 	return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
92 }
93 
94 static bool dax_is_locked(void *entry)
95 {
96 	return xa_to_value(entry) & DAX_LOCKED;
97 }
98 
99 static unsigned int dax_entry_order(void *entry)
100 {
101 	if (xa_to_value(entry) & DAX_PMD)
102 		return PMD_ORDER;
103 	return 0;
104 }
105 
106 static unsigned long dax_is_pmd_entry(void *entry)
107 {
108 	return xa_to_value(entry) & DAX_PMD;
109 }
110 
111 static bool dax_is_pte_entry(void *entry)
112 {
113 	return !(xa_to_value(entry) & DAX_PMD);
114 }
115 
116 static int dax_is_zero_entry(void *entry)
117 {
118 	return xa_to_value(entry) & DAX_ZERO_PAGE;
119 }
120 
121 static int dax_is_empty_entry(void *entry)
122 {
123 	return xa_to_value(entry) & DAX_EMPTY;
124 }
125 
126 /*
127  * DAX page cache entry locking
128  */
129 struct exceptional_entry_key {
130 	struct xarray *xa;
131 	pgoff_t entry_start;
132 };
133 
134 struct wait_exceptional_entry_queue {
135 	wait_queue_entry_t wait;
136 	struct exceptional_entry_key key;
137 };
138 
139 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
140 		void *entry, struct exceptional_entry_key *key)
141 {
142 	unsigned long hash;
143 	unsigned long index = xas->xa_index;
144 
145 	/*
146 	 * If 'entry' is a PMD, align the 'index' that we use for the wait
147 	 * queue to the start of that PMD.  This ensures that all offsets in
148 	 * the range covered by the PMD map to the same bit lock.
149 	 */
150 	if (dax_is_pmd_entry(entry))
151 		index &= ~PG_PMD_COLOUR;
152 	key->xa = xas->xa;
153 	key->entry_start = index;
154 
155 	hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
156 	return wait_table + hash;
157 }
158 
159 static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
160 		unsigned int mode, int sync, void *keyp)
161 {
162 	struct exceptional_entry_key *key = keyp;
163 	struct wait_exceptional_entry_queue *ewait =
164 		container_of(wait, struct wait_exceptional_entry_queue, wait);
165 
166 	if (key->xa != ewait->key.xa ||
167 	    key->entry_start != ewait->key.entry_start)
168 		return 0;
169 	return autoremove_wake_function(wait, mode, sync, NULL);
170 }
171 
172 /*
173  * @entry may no longer be the entry at the index in the mapping.
174  * The important information it's conveying is whether the entry at
175  * this index used to be a PMD entry.
176  */
177 static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
178 {
179 	struct exceptional_entry_key key;
180 	wait_queue_head_t *wq;
181 
182 	wq = dax_entry_waitqueue(xas, entry, &key);
183 
184 	/*
185 	 * Checking for locked entry and prepare_to_wait_exclusive() happens
186 	 * under the i_pages lock, ditto for entry handling in our callers.
187 	 * So at this point all tasks that could have seen our entry locked
188 	 * must be in the waitqueue and the following check will see them.
189 	 */
190 	if (waitqueue_active(wq))
191 		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
192 }
193 
194 /*
195  * Look up entry in page cache, wait for it to become unlocked if it
196  * is a DAX entry and return it.  The caller must subsequently call
197  * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
198  * if it did.
199  *
200  * Must be called with the i_pages lock held.
201  */
202 static void *get_unlocked_entry(struct xa_state *xas)
203 {
204 	void *entry;
205 	struct wait_exceptional_entry_queue ewait;
206 	wait_queue_head_t *wq;
207 
208 	init_wait(&ewait.wait);
209 	ewait.wait.func = wake_exceptional_entry_func;
210 
211 	for (;;) {
212 		entry = xas_find_conflict(xas);
213 		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
214 				!dax_is_locked(entry))
215 			return entry;
216 
217 		wq = dax_entry_waitqueue(xas, entry, &ewait.key);
218 		prepare_to_wait_exclusive(wq, &ewait.wait,
219 					  TASK_UNINTERRUPTIBLE);
220 		xas_unlock_irq(xas);
221 		xas_reset(xas);
222 		schedule();
223 		finish_wait(wq, &ewait.wait);
224 		xas_lock_irq(xas);
225 	}
226 }
227 
228 /*
229  * The only thing keeping the address space around is the i_pages lock
230  * (it's cycled in clear_inode() after removing the entries from i_pages)
231  * After we call xas_unlock_irq(), we cannot touch xas->xa.
232  */
233 static void wait_entry_unlocked(struct xa_state *xas, void *entry)
234 {
235 	struct wait_exceptional_entry_queue ewait;
236 	wait_queue_head_t *wq;
237 
238 	init_wait(&ewait.wait);
239 	ewait.wait.func = wake_exceptional_entry_func;
240 
241 	wq = dax_entry_waitqueue(xas, entry, &ewait.key);
242 	/*
243 	 * Unlike get_unlocked_entry() there is no guarantee that this
244 	 * path ever successfully retrieves an unlocked entry before an
245 	 * inode dies. Perform a non-exclusive wait in case this path
246 	 * never successfully performs its own wake up.
247 	 */
248 	prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
249 	xas_unlock_irq(xas);
250 	schedule();
251 	finish_wait(wq, &ewait.wait);
252 }
253 
254 static void put_unlocked_entry(struct xa_state *xas, void *entry)
255 {
256 	/* If we were the only waiter woken, wake the next one */
257 	if (entry)
258 		dax_wake_entry(xas, entry, false);
259 }
260 
261 /*
262  * We used the xa_state to get the entry, but then we locked the entry and
263  * dropped the xa_lock, so we know the xa_state is stale and must be reset
264  * before use.
265  */
266 static void dax_unlock_entry(struct xa_state *xas, void *entry)
267 {
268 	void *old;
269 
270 	BUG_ON(dax_is_locked(entry));
271 	xas_reset(xas);
272 	xas_lock_irq(xas);
273 	old = xas_store(xas, entry);
274 	xas_unlock_irq(xas);
275 	BUG_ON(!dax_is_locked(old));
276 	dax_wake_entry(xas, entry, false);
277 }
278 
279 /*
280  * Return: The entry stored at this location before it was locked.
281  */
282 static void *dax_lock_entry(struct xa_state *xas, void *entry)
283 {
284 	unsigned long v = xa_to_value(entry);
285 	return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
286 }
287 
288 static unsigned long dax_entry_size(void *entry)
289 {
290 	if (dax_is_zero_entry(entry))
291 		return 0;
292 	else if (dax_is_empty_entry(entry))
293 		return 0;
294 	else if (dax_is_pmd_entry(entry))
295 		return PMD_SIZE;
296 	else
297 		return PAGE_SIZE;
298 }
299 
300 static unsigned long dax_end_pfn(void *entry)
301 {
302 	return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
303 }
304 
305 /*
306  * Iterate through all mapped pfns represented by an entry, i.e. skip
307  * 'empty' and 'zero' entries.
308  */
309 #define for_each_mapped_pfn(entry, pfn) \
310 	for (pfn = dax_to_pfn(entry); \
311 			pfn < dax_end_pfn(entry); pfn++)
312 
313 /*
314  * TODO: for reflink+dax we need a way to associate a single page with
315  * multiple address_space instances at different linear_page_index()
316  * offsets.
317  */
318 static void dax_associate_entry(void *entry, struct address_space *mapping,
319 		struct vm_area_struct *vma, unsigned long address)
320 {
321 	unsigned long size = dax_entry_size(entry), pfn, index;
322 	int i = 0;
323 
324 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
325 		return;
326 
327 	index = linear_page_index(vma, address & ~(size - 1));
328 	for_each_mapped_pfn(entry, pfn) {
329 		struct page *page = pfn_to_page(pfn);
330 
331 		WARN_ON_ONCE(page->mapping);
332 		page->mapping = mapping;
333 		page->index = index + i++;
334 	}
335 }
336 
337 static void dax_disassociate_entry(void *entry, struct address_space *mapping,
338 		bool trunc)
339 {
340 	unsigned long pfn;
341 
342 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
343 		return;
344 
345 	for_each_mapped_pfn(entry, pfn) {
346 		struct page *page = pfn_to_page(pfn);
347 
348 		WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
349 		WARN_ON_ONCE(page->mapping && page->mapping != mapping);
350 		page->mapping = NULL;
351 		page->index = 0;
352 	}
353 }
354 
355 static struct page *dax_busy_page(void *entry)
356 {
357 	unsigned long pfn;
358 
359 	for_each_mapped_pfn(entry, pfn) {
360 		struct page *page = pfn_to_page(pfn);
361 
362 		if (page_ref_count(page) > 1)
363 			return page;
364 	}
365 	return NULL;
366 }
367 
368 /*
369  * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
370  * @page: The page whose entry we want to lock
371  *
372  * Context: Process context.
373  * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
374  * not be locked.
375  */
376 dax_entry_t dax_lock_page(struct page *page)
377 {
378 	XA_STATE(xas, NULL, 0);
379 	void *entry;
380 
381 	/* Ensure page->mapping isn't freed while we look at it */
382 	rcu_read_lock();
383 	for (;;) {
384 		struct address_space *mapping = READ_ONCE(page->mapping);
385 
386 		entry = NULL;
387 		if (!mapping || !dax_mapping(mapping))
388 			break;
389 
390 		/*
391 		 * In the device-dax case there's no need to lock, a
392 		 * struct dev_pagemap pin is sufficient to keep the
393 		 * inode alive, and we assume we have dev_pagemap pin
394 		 * otherwise we would not have a valid pfn_to_page()
395 		 * translation.
396 		 */
397 		entry = (void *)~0UL;
398 		if (S_ISCHR(mapping->host->i_mode))
399 			break;
400 
401 		xas.xa = &mapping->i_pages;
402 		xas_lock_irq(&xas);
403 		if (mapping != page->mapping) {
404 			xas_unlock_irq(&xas);
405 			continue;
406 		}
407 		xas_set(&xas, page->index);
408 		entry = xas_load(&xas);
409 		if (dax_is_locked(entry)) {
410 			rcu_read_unlock();
411 			wait_entry_unlocked(&xas, entry);
412 			rcu_read_lock();
413 			continue;
414 		}
415 		dax_lock_entry(&xas, entry);
416 		xas_unlock_irq(&xas);
417 		break;
418 	}
419 	rcu_read_unlock();
420 	return (dax_entry_t)entry;
421 }
422 
423 void dax_unlock_page(struct page *page, dax_entry_t cookie)
424 {
425 	struct address_space *mapping = page->mapping;
426 	XA_STATE(xas, &mapping->i_pages, page->index);
427 
428 	if (S_ISCHR(mapping->host->i_mode))
429 		return;
430 
431 	dax_unlock_entry(&xas, (void *)cookie);
432 }
433 
434 /*
435  * Find page cache entry at given index. If it is a DAX entry, return it
436  * with the entry locked. If the page cache doesn't contain an entry at
437  * that index, add a locked empty entry.
438  *
439  * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
440  * either return that locked entry or will return VM_FAULT_FALLBACK.
441  * This will happen if there are any PTE entries within the PMD range
442  * that we are requesting.
443  *
444  * We always favor PTE entries over PMD entries. There isn't a flow where we
445  * evict PTE entries in order to 'upgrade' them to a PMD entry.  A PMD
446  * insertion will fail if it finds any PTE entries already in the tree, and a
447  * PTE insertion will cause an existing PMD entry to be unmapped and
448  * downgraded to PTE entries.  This happens for both PMD zero pages as
449  * well as PMD empty entries.
450  *
451  * The exception to this downgrade path is for PMD entries that have
452  * real storage backing them.  We will leave these real PMD entries in
453  * the tree, and PTE writes will simply dirty the entire PMD entry.
454  *
455  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
456  * persistent memory the benefit is doubtful. We can add that later if we can
457  * show it helps.
458  *
459  * On error, this function does not return an ERR_PTR.  Instead it returns
460  * a VM_FAULT code, encoded as an xarray internal entry.  The ERR_PTR values
461  * overlap with xarray value entries.
462  */
463 static void *grab_mapping_entry(struct xa_state *xas,
464 		struct address_space *mapping, unsigned long size_flag)
465 {
466 	unsigned long index = xas->xa_index;
467 	bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
468 	void *entry;
469 
470 retry:
471 	xas_lock_irq(xas);
472 	entry = get_unlocked_entry(xas);
473 
474 	if (entry) {
475 		if (!xa_is_value(entry)) {
476 			xas_set_err(xas, EIO);
477 			goto out_unlock;
478 		}
479 
480 		if (size_flag & DAX_PMD) {
481 			if (dax_is_pte_entry(entry)) {
482 				put_unlocked_entry(xas, entry);
483 				goto fallback;
484 			}
485 		} else { /* trying to grab a PTE entry */
486 			if (dax_is_pmd_entry(entry) &&
487 			    (dax_is_zero_entry(entry) ||
488 			     dax_is_empty_entry(entry))) {
489 				pmd_downgrade = true;
490 			}
491 		}
492 	}
493 
494 	if (pmd_downgrade) {
495 		/*
496 		 * Make sure 'entry' remains valid while we drop
497 		 * the i_pages lock.
498 		 */
499 		dax_lock_entry(xas, entry);
500 
501 		/*
502 		 * Besides huge zero pages the only other thing that gets
503 		 * downgraded are empty entries which don't need to be
504 		 * unmapped.
505 		 */
506 		if (dax_is_zero_entry(entry)) {
507 			xas_unlock_irq(xas);
508 			unmap_mapping_pages(mapping,
509 					xas->xa_index & ~PG_PMD_COLOUR,
510 					PG_PMD_NR, false);
511 			xas_reset(xas);
512 			xas_lock_irq(xas);
513 		}
514 
515 		dax_disassociate_entry(entry, mapping, false);
516 		xas_store(xas, NULL);	/* undo the PMD join */
517 		dax_wake_entry(xas, entry, true);
518 		mapping->nrexceptional--;
519 		entry = NULL;
520 		xas_set(xas, index);
521 	}
522 
523 	if (entry) {
524 		dax_lock_entry(xas, entry);
525 	} else {
526 		entry = dax_make_entry(pfn_to_pfn_t(0), size_flag | DAX_EMPTY);
527 		dax_lock_entry(xas, entry);
528 		if (xas_error(xas))
529 			goto out_unlock;
530 		mapping->nrexceptional++;
531 	}
532 
533 out_unlock:
534 	xas_unlock_irq(xas);
535 	if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
536 		goto retry;
537 	if (xas->xa_node == XA_ERROR(-ENOMEM))
538 		return xa_mk_internal(VM_FAULT_OOM);
539 	if (xas_error(xas))
540 		return xa_mk_internal(VM_FAULT_SIGBUS);
541 	return entry;
542 fallback:
543 	xas_unlock_irq(xas);
544 	return xa_mk_internal(VM_FAULT_FALLBACK);
545 }
546 
547 /**
548  * dax_layout_busy_page - find first pinned page in @mapping
549  * @mapping: address space to scan for a page with ref count > 1
550  *
551  * DAX requires ZONE_DEVICE mapped pages. These pages are never
552  * 'onlined' to the page allocator so they are considered idle when
553  * page->count == 1. A filesystem uses this interface to determine if
554  * any page in the mapping is busy, i.e. for DMA, or other
555  * get_user_pages() usages.
556  *
557  * It is expected that the filesystem is holding locks to block the
558  * establishment of new mappings in this address_space. I.e. it expects
559  * to be able to run unmap_mapping_range() and subsequently not race
560  * mapping_mapped() becoming true.
561  */
562 struct page *dax_layout_busy_page(struct address_space *mapping)
563 {
564 	XA_STATE(xas, &mapping->i_pages, 0);
565 	void *entry;
566 	unsigned int scanned = 0;
567 	struct page *page = NULL;
568 
569 	/*
570 	 * In the 'limited' case get_user_pages() for dax is disabled.
571 	 */
572 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
573 		return NULL;
574 
575 	if (!dax_mapping(mapping) || !mapping_mapped(mapping))
576 		return NULL;
577 
578 	/*
579 	 * If we race get_user_pages_fast() here either we'll see the
580 	 * elevated page count in the iteration and wait, or
581 	 * get_user_pages_fast() will see that the page it took a reference
582 	 * against is no longer mapped in the page tables and bail to the
583 	 * get_user_pages() slow path.  The slow path is protected by
584 	 * pte_lock() and pmd_lock(). New references are not taken without
585 	 * holding those locks, and unmap_mapping_range() will not zero the
586 	 * pte or pmd without holding the respective lock, so we are
587 	 * guaranteed to either see new references or prevent new
588 	 * references from being established.
589 	 */
590 	unmap_mapping_range(mapping, 0, 0, 1);
591 
592 	xas_lock_irq(&xas);
593 	xas_for_each(&xas, entry, ULONG_MAX) {
594 		if (WARN_ON_ONCE(!xa_is_value(entry)))
595 			continue;
596 		if (unlikely(dax_is_locked(entry)))
597 			entry = get_unlocked_entry(&xas);
598 		if (entry)
599 			page = dax_busy_page(entry);
600 		put_unlocked_entry(&xas, entry);
601 		if (page)
602 			break;
603 		if (++scanned % XA_CHECK_SCHED)
604 			continue;
605 
606 		xas_pause(&xas);
607 		xas_unlock_irq(&xas);
608 		cond_resched();
609 		xas_lock_irq(&xas);
610 	}
611 	xas_unlock_irq(&xas);
612 	return page;
613 }
614 EXPORT_SYMBOL_GPL(dax_layout_busy_page);
615 
616 static int __dax_invalidate_entry(struct address_space *mapping,
617 					  pgoff_t index, bool trunc)
618 {
619 	XA_STATE(xas, &mapping->i_pages, index);
620 	int ret = 0;
621 	void *entry;
622 
623 	xas_lock_irq(&xas);
624 	entry = get_unlocked_entry(&xas);
625 	if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
626 		goto out;
627 	if (!trunc &&
628 	    (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
629 	     xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
630 		goto out;
631 	dax_disassociate_entry(entry, mapping, trunc);
632 	xas_store(&xas, NULL);
633 	mapping->nrexceptional--;
634 	ret = 1;
635 out:
636 	put_unlocked_entry(&xas, entry);
637 	xas_unlock_irq(&xas);
638 	return ret;
639 }
640 
641 /*
642  * Delete DAX entry at @index from @mapping.  Wait for it
643  * to be unlocked before deleting it.
644  */
645 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
646 {
647 	int ret = __dax_invalidate_entry(mapping, index, true);
648 
649 	/*
650 	 * This gets called from truncate / punch_hole path. As such, the caller
651 	 * must hold locks protecting against concurrent modifications of the
652 	 * page cache (usually fs-private i_mmap_sem for writing). Since the
653 	 * caller has seen a DAX entry for this index, we better find it
654 	 * at that index as well...
655 	 */
656 	WARN_ON_ONCE(!ret);
657 	return ret;
658 }
659 
660 /*
661  * Invalidate DAX entry if it is clean.
662  */
663 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
664 				      pgoff_t index)
665 {
666 	return __dax_invalidate_entry(mapping, index, false);
667 }
668 
669 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
670 		sector_t sector, size_t size, struct page *to,
671 		unsigned long vaddr)
672 {
673 	void *vto, *kaddr;
674 	pgoff_t pgoff;
675 	long rc;
676 	int id;
677 
678 	rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
679 	if (rc)
680 		return rc;
681 
682 	id = dax_read_lock();
683 	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL);
684 	if (rc < 0) {
685 		dax_read_unlock(id);
686 		return rc;
687 	}
688 	vto = kmap_atomic(to);
689 	copy_user_page(vto, (void __force *)kaddr, vaddr, to);
690 	kunmap_atomic(vto);
691 	dax_read_unlock(id);
692 	return 0;
693 }
694 
695 /*
696  * By this point grab_mapping_entry() has ensured that we have a locked entry
697  * of the appropriate size so we don't have to worry about downgrading PMDs to
698  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
699  * already in the tree, we will skip the insertion and just dirty the PMD as
700  * appropriate.
701  */
702 static void *dax_insert_entry(struct xa_state *xas,
703 		struct address_space *mapping, struct vm_fault *vmf,
704 		void *entry, pfn_t pfn, unsigned long flags, bool dirty)
705 {
706 	void *new_entry = dax_make_entry(pfn, flags);
707 
708 	if (dirty)
709 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
710 
711 	if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
712 		unsigned long index = xas->xa_index;
713 		/* we are replacing a zero page with block mapping */
714 		if (dax_is_pmd_entry(entry))
715 			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
716 					PG_PMD_NR, false);
717 		else /* pte entry */
718 			unmap_mapping_pages(mapping, index, 1, false);
719 	}
720 
721 	xas_reset(xas);
722 	xas_lock_irq(xas);
723 	if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
724 		dax_disassociate_entry(entry, mapping, false);
725 		dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
726 	}
727 
728 	if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
729 		/*
730 		 * Only swap our new entry into the page cache if the current
731 		 * entry is a zero page or an empty entry.  If a normal PTE or
732 		 * PMD entry is already in the cache, we leave it alone.  This
733 		 * means that if we are trying to insert a PTE and the
734 		 * existing entry is a PMD, we will just leave the PMD in the
735 		 * tree and dirty it if necessary.
736 		 */
737 		void *old = dax_lock_entry(xas, new_entry);
738 		WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
739 					DAX_LOCKED));
740 		entry = new_entry;
741 	} else {
742 		xas_load(xas);	/* Walk the xa_state */
743 	}
744 
745 	if (dirty)
746 		xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
747 
748 	xas_unlock_irq(xas);
749 	return entry;
750 }
751 
752 static inline
753 unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
754 {
755 	unsigned long address;
756 
757 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
758 	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
759 	return address;
760 }
761 
762 /* Walk all mappings of a given index of a file and writeprotect them */
763 static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
764 		unsigned long pfn)
765 {
766 	struct vm_area_struct *vma;
767 	pte_t pte, *ptep = NULL;
768 	pmd_t *pmdp = NULL;
769 	spinlock_t *ptl;
770 
771 	i_mmap_lock_read(mapping);
772 	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
773 		struct mmu_notifier_range range;
774 		unsigned long address;
775 
776 		cond_resched();
777 
778 		if (!(vma->vm_flags & VM_SHARED))
779 			continue;
780 
781 		address = pgoff_address(index, vma);
782 
783 		/*
784 		 * Note because we provide range to follow_pte_pmd it will
785 		 * call mmu_notifier_invalidate_range_start() on our behalf
786 		 * before taking any lock.
787 		 */
788 		if (follow_pte_pmd(vma->vm_mm, address, &range,
789 				   &ptep, &pmdp, &ptl))
790 			continue;
791 
792 		/*
793 		 * No need to call mmu_notifier_invalidate_range() as we are
794 		 * downgrading page table protection not changing it to point
795 		 * to a new page.
796 		 *
797 		 * See Documentation/vm/mmu_notifier.rst
798 		 */
799 		if (pmdp) {
800 #ifdef CONFIG_FS_DAX_PMD
801 			pmd_t pmd;
802 
803 			if (pfn != pmd_pfn(*pmdp))
804 				goto unlock_pmd;
805 			if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
806 				goto unlock_pmd;
807 
808 			flush_cache_page(vma, address, pfn);
809 			pmd = pmdp_invalidate(vma, address, pmdp);
810 			pmd = pmd_wrprotect(pmd);
811 			pmd = pmd_mkclean(pmd);
812 			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
813 unlock_pmd:
814 #endif
815 			spin_unlock(ptl);
816 		} else {
817 			if (pfn != pte_pfn(*ptep))
818 				goto unlock_pte;
819 			if (!pte_dirty(*ptep) && !pte_write(*ptep))
820 				goto unlock_pte;
821 
822 			flush_cache_page(vma, address, pfn);
823 			pte = ptep_clear_flush(vma, address, ptep);
824 			pte = pte_wrprotect(pte);
825 			pte = pte_mkclean(pte);
826 			set_pte_at(vma->vm_mm, address, ptep, pte);
827 unlock_pte:
828 			pte_unmap_unlock(ptep, ptl);
829 		}
830 
831 		mmu_notifier_invalidate_range_end(&range);
832 	}
833 	i_mmap_unlock_read(mapping);
834 }
835 
836 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
837 		struct address_space *mapping, void *entry)
838 {
839 	unsigned long pfn, index, count;
840 	long ret = 0;
841 
842 	/*
843 	 * A page got tagged dirty in DAX mapping? Something is seriously
844 	 * wrong.
845 	 */
846 	if (WARN_ON(!xa_is_value(entry)))
847 		return -EIO;
848 
849 	if (unlikely(dax_is_locked(entry))) {
850 		void *old_entry = entry;
851 
852 		entry = get_unlocked_entry(xas);
853 
854 		/* Entry got punched out / reallocated? */
855 		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
856 			goto put_unlocked;
857 		/*
858 		 * Entry got reallocated elsewhere? No need to writeback.
859 		 * We have to compare pfns as we must not bail out due to
860 		 * difference in lockbit or entry type.
861 		 */
862 		if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
863 			goto put_unlocked;
864 		if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
865 					dax_is_zero_entry(entry))) {
866 			ret = -EIO;
867 			goto put_unlocked;
868 		}
869 
870 		/* Another fsync thread may have already done this entry */
871 		if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
872 			goto put_unlocked;
873 	}
874 
875 	/* Lock the entry to serialize with page faults */
876 	dax_lock_entry(xas, entry);
877 
878 	/*
879 	 * We can clear the tag now but we have to be careful so that concurrent
880 	 * dax_writeback_one() calls for the same index cannot finish before we
881 	 * actually flush the caches. This is achieved as the calls will look
882 	 * at the entry only under the i_pages lock and once they do that
883 	 * they will see the entry locked and wait for it to unlock.
884 	 */
885 	xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
886 	xas_unlock_irq(xas);
887 
888 	/*
889 	 * If dax_writeback_mapping_range() was given a wbc->range_start
890 	 * in the middle of a PMD, the 'index' we use needs to be
891 	 * aligned to the start of the PMD.
892 	 * This allows us to flush for PMD_SIZE and not have to worry about
893 	 * partial PMD writebacks.
894 	 */
895 	pfn = dax_to_pfn(entry);
896 	count = 1UL << dax_entry_order(entry);
897 	index = xas->xa_index & ~(count - 1);
898 
899 	dax_entry_mkclean(mapping, index, pfn);
900 	dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
901 	/*
902 	 * After we have flushed the cache, we can clear the dirty tag. There
903 	 * cannot be new dirty data in the pfn after the flush has completed as
904 	 * the pfn mappings are writeprotected and fault waits for mapping
905 	 * entry lock.
906 	 */
907 	xas_reset(xas);
908 	xas_lock_irq(xas);
909 	xas_store(xas, entry);
910 	xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
911 	dax_wake_entry(xas, entry, false);
912 
913 	trace_dax_writeback_one(mapping->host, index, count);
914 	return ret;
915 
916  put_unlocked:
917 	put_unlocked_entry(xas, entry);
918 	return ret;
919 }
920 
921 /*
922  * Flush the mapping to the persistent domain within the byte range of [start,
923  * end]. This is required by data integrity operations to ensure file data is
924  * on persistent storage prior to completion of the operation.
925  */
926 int dax_writeback_mapping_range(struct address_space *mapping,
927 		struct block_device *bdev, struct writeback_control *wbc)
928 {
929 	XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
930 	struct inode *inode = mapping->host;
931 	pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
932 	struct dax_device *dax_dev;
933 	void *entry;
934 	int ret = 0;
935 	unsigned int scanned = 0;
936 
937 	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
938 		return -EIO;
939 
940 	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
941 		return 0;
942 
943 	dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
944 	if (!dax_dev)
945 		return -EIO;
946 
947 	trace_dax_writeback_range(inode, xas.xa_index, end_index);
948 
949 	tag_pages_for_writeback(mapping, xas.xa_index, end_index);
950 
951 	xas_lock_irq(&xas);
952 	xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
953 		ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
954 		if (ret < 0) {
955 			mapping_set_error(mapping, ret);
956 			break;
957 		}
958 		if (++scanned % XA_CHECK_SCHED)
959 			continue;
960 
961 		xas_pause(&xas);
962 		xas_unlock_irq(&xas);
963 		cond_resched();
964 		xas_lock_irq(&xas);
965 	}
966 	xas_unlock_irq(&xas);
967 	put_dax(dax_dev);
968 	trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
969 	return ret;
970 }
971 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
972 
973 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
974 {
975 	return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
976 }
977 
978 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
979 			 pfn_t *pfnp)
980 {
981 	const sector_t sector = dax_iomap_sector(iomap, pos);
982 	pgoff_t pgoff;
983 	int id, rc;
984 	long length;
985 
986 	rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
987 	if (rc)
988 		return rc;
989 	id = dax_read_lock();
990 	length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
991 				   NULL, pfnp);
992 	if (length < 0) {
993 		rc = length;
994 		goto out;
995 	}
996 	rc = -EINVAL;
997 	if (PFN_PHYS(length) < size)
998 		goto out;
999 	if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1000 		goto out;
1001 	/* For larger pages we need devmap */
1002 	if (length > 1 && !pfn_t_devmap(*pfnp))
1003 		goto out;
1004 	rc = 0;
1005 out:
1006 	dax_read_unlock(id);
1007 	return rc;
1008 }
1009 
1010 /*
1011  * The user has performed a load from a hole in the file.  Allocating a new
1012  * page in the file would cause excessive storage usage for workloads with
1013  * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
1014  * If this page is ever written to we will re-fault and change the mapping to
1015  * point to real DAX storage instead.
1016  */
1017 static vm_fault_t dax_load_hole(struct xa_state *xas,
1018 		struct address_space *mapping, void **entry,
1019 		struct vm_fault *vmf)
1020 {
1021 	struct inode *inode = mapping->host;
1022 	unsigned long vaddr = vmf->address;
1023 	pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1024 	vm_fault_t ret;
1025 
1026 	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1027 			DAX_ZERO_PAGE, false);
1028 
1029 	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1030 	trace_dax_load_hole(inode, vmf, ret);
1031 	return ret;
1032 }
1033 
1034 static bool dax_range_is_aligned(struct block_device *bdev,
1035 				 unsigned int offset, unsigned int length)
1036 {
1037 	unsigned short sector_size = bdev_logical_block_size(bdev);
1038 
1039 	if (!IS_ALIGNED(offset, sector_size))
1040 		return false;
1041 	if (!IS_ALIGNED(length, sector_size))
1042 		return false;
1043 
1044 	return true;
1045 }
1046 
1047 int __dax_zero_page_range(struct block_device *bdev,
1048 		struct dax_device *dax_dev, sector_t sector,
1049 		unsigned int offset, unsigned int size)
1050 {
1051 	if (dax_range_is_aligned(bdev, offset, size)) {
1052 		sector_t start_sector = sector + (offset >> 9);
1053 
1054 		return blkdev_issue_zeroout(bdev, start_sector,
1055 				size >> 9, GFP_NOFS, 0);
1056 	} else {
1057 		pgoff_t pgoff;
1058 		long rc, id;
1059 		void *kaddr;
1060 
1061 		rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
1062 		if (rc)
1063 			return rc;
1064 
1065 		id = dax_read_lock();
1066 		rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
1067 		if (rc < 0) {
1068 			dax_read_unlock(id);
1069 			return rc;
1070 		}
1071 		memset(kaddr + offset, 0, size);
1072 		dax_flush(dax_dev, kaddr + offset, size);
1073 		dax_read_unlock(id);
1074 	}
1075 	return 0;
1076 }
1077 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
1078 
1079 static loff_t
1080 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1081 		struct iomap *iomap)
1082 {
1083 	struct block_device *bdev = iomap->bdev;
1084 	struct dax_device *dax_dev = iomap->dax_dev;
1085 	struct iov_iter *iter = data;
1086 	loff_t end = pos + length, done = 0;
1087 	ssize_t ret = 0;
1088 	size_t xfer;
1089 	int id;
1090 
1091 	if (iov_iter_rw(iter) == READ) {
1092 		end = min(end, i_size_read(inode));
1093 		if (pos >= end)
1094 			return 0;
1095 
1096 		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1097 			return iov_iter_zero(min(length, end - pos), iter);
1098 	}
1099 
1100 	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1101 		return -EIO;
1102 
1103 	/*
1104 	 * Write can allocate block for an area which has a hole page mapped
1105 	 * into page tables. We have to tear down these mappings so that data
1106 	 * written by write(2) is visible in mmap.
1107 	 */
1108 	if (iomap->flags & IOMAP_F_NEW) {
1109 		invalidate_inode_pages2_range(inode->i_mapping,
1110 					      pos >> PAGE_SHIFT,
1111 					      (end - 1) >> PAGE_SHIFT);
1112 	}
1113 
1114 	id = dax_read_lock();
1115 	while (pos < end) {
1116 		unsigned offset = pos & (PAGE_SIZE - 1);
1117 		const size_t size = ALIGN(length + offset, PAGE_SIZE);
1118 		const sector_t sector = dax_iomap_sector(iomap, pos);
1119 		ssize_t map_len;
1120 		pgoff_t pgoff;
1121 		void *kaddr;
1122 
1123 		if (fatal_signal_pending(current)) {
1124 			ret = -EINTR;
1125 			break;
1126 		}
1127 
1128 		ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1129 		if (ret)
1130 			break;
1131 
1132 		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1133 				&kaddr, NULL);
1134 		if (map_len < 0) {
1135 			ret = map_len;
1136 			break;
1137 		}
1138 
1139 		map_len = PFN_PHYS(map_len);
1140 		kaddr += offset;
1141 		map_len -= offset;
1142 		if (map_len > end - pos)
1143 			map_len = end - pos;
1144 
1145 		/*
1146 		 * The userspace address for the memory copy has already been
1147 		 * validated via access_ok() in either vfs_read() or
1148 		 * vfs_write(), depending on which operation we are doing.
1149 		 */
1150 		if (iov_iter_rw(iter) == WRITE)
1151 			xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1152 					map_len, iter);
1153 		else
1154 			xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1155 					map_len, iter);
1156 
1157 		pos += xfer;
1158 		length -= xfer;
1159 		done += xfer;
1160 
1161 		if (xfer == 0)
1162 			ret = -EFAULT;
1163 		if (xfer < map_len)
1164 			break;
1165 	}
1166 	dax_read_unlock(id);
1167 
1168 	return done ? done : ret;
1169 }
1170 
1171 /**
1172  * dax_iomap_rw - Perform I/O to a DAX file
1173  * @iocb:	The control block for this I/O
1174  * @iter:	The addresses to do I/O from or to
1175  * @ops:	iomap ops passed from the file system
1176  *
1177  * This function performs read and write operations to directly mapped
1178  * persistent memory.  The callers needs to take care of read/write exclusion
1179  * and evicting any page cache pages in the region under I/O.
1180  */
1181 ssize_t
1182 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1183 		const struct iomap_ops *ops)
1184 {
1185 	struct address_space *mapping = iocb->ki_filp->f_mapping;
1186 	struct inode *inode = mapping->host;
1187 	loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1188 	unsigned flags = 0;
1189 
1190 	if (iov_iter_rw(iter) == WRITE) {
1191 		lockdep_assert_held_exclusive(&inode->i_rwsem);
1192 		flags |= IOMAP_WRITE;
1193 	} else {
1194 		lockdep_assert_held(&inode->i_rwsem);
1195 	}
1196 
1197 	while (iov_iter_count(iter)) {
1198 		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1199 				iter, dax_iomap_actor);
1200 		if (ret <= 0)
1201 			break;
1202 		pos += ret;
1203 		done += ret;
1204 	}
1205 
1206 	iocb->ki_pos += done;
1207 	return done ? done : ret;
1208 }
1209 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1210 
1211 static vm_fault_t dax_fault_return(int error)
1212 {
1213 	if (error == 0)
1214 		return VM_FAULT_NOPAGE;
1215 	return vmf_error(error);
1216 }
1217 
1218 /*
1219  * MAP_SYNC on a dax mapping guarantees dirty metadata is
1220  * flushed on write-faults (non-cow), but not read-faults.
1221  */
1222 static bool dax_fault_is_synchronous(unsigned long flags,
1223 		struct vm_area_struct *vma, struct iomap *iomap)
1224 {
1225 	return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1226 		&& (iomap->flags & IOMAP_F_DIRTY);
1227 }
1228 
1229 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1230 			       int *iomap_errp, const struct iomap_ops *ops)
1231 {
1232 	struct vm_area_struct *vma = vmf->vma;
1233 	struct address_space *mapping = vma->vm_file->f_mapping;
1234 	XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
1235 	struct inode *inode = mapping->host;
1236 	unsigned long vaddr = vmf->address;
1237 	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1238 	struct iomap iomap = { 0 };
1239 	unsigned flags = IOMAP_FAULT;
1240 	int error, major = 0;
1241 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1242 	bool sync;
1243 	vm_fault_t ret = 0;
1244 	void *entry;
1245 	pfn_t pfn;
1246 
1247 	trace_dax_pte_fault(inode, vmf, ret);
1248 	/*
1249 	 * Check whether offset isn't beyond end of file now. Caller is supposed
1250 	 * to hold locks serializing us with truncate / punch hole so this is
1251 	 * a reliable test.
1252 	 */
1253 	if (pos >= i_size_read(inode)) {
1254 		ret = VM_FAULT_SIGBUS;
1255 		goto out;
1256 	}
1257 
1258 	if (write && !vmf->cow_page)
1259 		flags |= IOMAP_WRITE;
1260 
1261 	entry = grab_mapping_entry(&xas, mapping, 0);
1262 	if (xa_is_internal(entry)) {
1263 		ret = xa_to_internal(entry);
1264 		goto out;
1265 	}
1266 
1267 	/*
1268 	 * It is possible, particularly with mixed reads & writes to private
1269 	 * mappings, that we have raced with a PMD fault that overlaps with
1270 	 * the PTE we need to set up.  If so just return and the fault will be
1271 	 * retried.
1272 	 */
1273 	if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1274 		ret = VM_FAULT_NOPAGE;
1275 		goto unlock_entry;
1276 	}
1277 
1278 	/*
1279 	 * Note that we don't bother to use iomap_apply here: DAX required
1280 	 * the file system block size to be equal the page size, which means
1281 	 * that we never have to deal with more than a single extent here.
1282 	 */
1283 	error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1284 	if (iomap_errp)
1285 		*iomap_errp = error;
1286 	if (error) {
1287 		ret = dax_fault_return(error);
1288 		goto unlock_entry;
1289 	}
1290 	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1291 		error = -EIO;	/* fs corruption? */
1292 		goto error_finish_iomap;
1293 	}
1294 
1295 	if (vmf->cow_page) {
1296 		sector_t sector = dax_iomap_sector(&iomap, pos);
1297 
1298 		switch (iomap.type) {
1299 		case IOMAP_HOLE:
1300 		case IOMAP_UNWRITTEN:
1301 			clear_user_highpage(vmf->cow_page, vaddr);
1302 			break;
1303 		case IOMAP_MAPPED:
1304 			error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1305 					sector, PAGE_SIZE, vmf->cow_page, vaddr);
1306 			break;
1307 		default:
1308 			WARN_ON_ONCE(1);
1309 			error = -EIO;
1310 			break;
1311 		}
1312 
1313 		if (error)
1314 			goto error_finish_iomap;
1315 
1316 		__SetPageUptodate(vmf->cow_page);
1317 		ret = finish_fault(vmf);
1318 		if (!ret)
1319 			ret = VM_FAULT_DONE_COW;
1320 		goto finish_iomap;
1321 	}
1322 
1323 	sync = dax_fault_is_synchronous(flags, vma, &iomap);
1324 
1325 	switch (iomap.type) {
1326 	case IOMAP_MAPPED:
1327 		if (iomap.flags & IOMAP_F_NEW) {
1328 			count_vm_event(PGMAJFAULT);
1329 			count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1330 			major = VM_FAULT_MAJOR;
1331 		}
1332 		error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1333 		if (error < 0)
1334 			goto error_finish_iomap;
1335 
1336 		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1337 						 0, write && !sync);
1338 
1339 		/*
1340 		 * If we are doing synchronous page fault and inode needs fsync,
1341 		 * we can insert PTE into page tables only after that happens.
1342 		 * Skip insertion for now and return the pfn so that caller can
1343 		 * insert it after fsync is done.
1344 		 */
1345 		if (sync) {
1346 			if (WARN_ON_ONCE(!pfnp)) {
1347 				error = -EIO;
1348 				goto error_finish_iomap;
1349 			}
1350 			*pfnp = pfn;
1351 			ret = VM_FAULT_NEEDDSYNC | major;
1352 			goto finish_iomap;
1353 		}
1354 		trace_dax_insert_mapping(inode, vmf, entry);
1355 		if (write)
1356 			ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
1357 		else
1358 			ret = vmf_insert_mixed(vma, vaddr, pfn);
1359 
1360 		goto finish_iomap;
1361 	case IOMAP_UNWRITTEN:
1362 	case IOMAP_HOLE:
1363 		if (!write) {
1364 			ret = dax_load_hole(&xas, mapping, &entry, vmf);
1365 			goto finish_iomap;
1366 		}
1367 		/*FALLTHRU*/
1368 	default:
1369 		WARN_ON_ONCE(1);
1370 		error = -EIO;
1371 		break;
1372 	}
1373 
1374  error_finish_iomap:
1375 	ret = dax_fault_return(error);
1376  finish_iomap:
1377 	if (ops->iomap_end) {
1378 		int copied = PAGE_SIZE;
1379 
1380 		if (ret & VM_FAULT_ERROR)
1381 			copied = 0;
1382 		/*
1383 		 * The fault is done by now and there's no way back (other
1384 		 * thread may be already happily using PTE we have installed).
1385 		 * Just ignore error from ->iomap_end since we cannot do much
1386 		 * with it.
1387 		 */
1388 		ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1389 	}
1390  unlock_entry:
1391 	dax_unlock_entry(&xas, entry);
1392  out:
1393 	trace_dax_pte_fault_done(inode, vmf, ret);
1394 	return ret | major;
1395 }
1396 
1397 #ifdef CONFIG_FS_DAX_PMD
1398 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1399 		struct iomap *iomap, void **entry)
1400 {
1401 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1402 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1403 	struct vm_area_struct *vma = vmf->vma;
1404 	struct inode *inode = mapping->host;
1405 	pgtable_t pgtable = NULL;
1406 	struct page *zero_page;
1407 	spinlock_t *ptl;
1408 	pmd_t pmd_entry;
1409 	pfn_t pfn;
1410 
1411 	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1412 
1413 	if (unlikely(!zero_page))
1414 		goto fallback;
1415 
1416 	pfn = page_to_pfn_t(zero_page);
1417 	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1418 			DAX_PMD | DAX_ZERO_PAGE, false);
1419 
1420 	if (arch_needs_pgtable_deposit()) {
1421 		pgtable = pte_alloc_one(vma->vm_mm);
1422 		if (!pgtable)
1423 			return VM_FAULT_OOM;
1424 	}
1425 
1426 	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1427 	if (!pmd_none(*(vmf->pmd))) {
1428 		spin_unlock(ptl);
1429 		goto fallback;
1430 	}
1431 
1432 	if (pgtable) {
1433 		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1434 		mm_inc_nr_ptes(vma->vm_mm);
1435 	}
1436 	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1437 	pmd_entry = pmd_mkhuge(pmd_entry);
1438 	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1439 	spin_unlock(ptl);
1440 	trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
1441 	return VM_FAULT_NOPAGE;
1442 
1443 fallback:
1444 	if (pgtable)
1445 		pte_free(vma->vm_mm, pgtable);
1446 	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
1447 	return VM_FAULT_FALLBACK;
1448 }
1449 
1450 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1451 			       const struct iomap_ops *ops)
1452 {
1453 	struct vm_area_struct *vma = vmf->vma;
1454 	struct address_space *mapping = vma->vm_file->f_mapping;
1455 	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
1456 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1457 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1458 	bool sync;
1459 	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1460 	struct inode *inode = mapping->host;
1461 	vm_fault_t result = VM_FAULT_FALLBACK;
1462 	struct iomap iomap = { 0 };
1463 	pgoff_t max_pgoff;
1464 	void *entry;
1465 	loff_t pos;
1466 	int error;
1467 	pfn_t pfn;
1468 
1469 	/*
1470 	 * Check whether offset isn't beyond end of file now. Caller is
1471 	 * supposed to hold locks serializing us with truncate / punch hole so
1472 	 * this is a reliable test.
1473 	 */
1474 	max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1475 
1476 	trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1477 
1478 	/*
1479 	 * Make sure that the faulting address's PMD offset (color) matches
1480 	 * the PMD offset from the start of the file.  This is necessary so
1481 	 * that a PMD range in the page table overlaps exactly with a PMD
1482 	 * range in the page cache.
1483 	 */
1484 	if ((vmf->pgoff & PG_PMD_COLOUR) !=
1485 	    ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1486 		goto fallback;
1487 
1488 	/* Fall back to PTEs if we're going to COW */
1489 	if (write && !(vma->vm_flags & VM_SHARED))
1490 		goto fallback;
1491 
1492 	/* If the PMD would extend outside the VMA */
1493 	if (pmd_addr < vma->vm_start)
1494 		goto fallback;
1495 	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1496 		goto fallback;
1497 
1498 	if (xas.xa_index >= max_pgoff) {
1499 		result = VM_FAULT_SIGBUS;
1500 		goto out;
1501 	}
1502 
1503 	/* If the PMD would extend beyond the file size */
1504 	if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff)
1505 		goto fallback;
1506 
1507 	/*
1508 	 * grab_mapping_entry() will make sure we get an empty PMD entry,
1509 	 * a zero PMD entry or a DAX PMD.  If it can't (because a PTE
1510 	 * entry is already in the array, for instance), it will return
1511 	 * VM_FAULT_FALLBACK.
1512 	 */
1513 	entry = grab_mapping_entry(&xas, mapping, DAX_PMD);
1514 	if (xa_is_internal(entry)) {
1515 		result = xa_to_internal(entry);
1516 		goto fallback;
1517 	}
1518 
1519 	/*
1520 	 * It is possible, particularly with mixed reads & writes to private
1521 	 * mappings, that we have raced with a PTE fault that overlaps with
1522 	 * the PMD we need to set up.  If so just return and the fault will be
1523 	 * retried.
1524 	 */
1525 	if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1526 			!pmd_devmap(*vmf->pmd)) {
1527 		result = 0;
1528 		goto unlock_entry;
1529 	}
1530 
1531 	/*
1532 	 * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1533 	 * setting up a mapping, so really we're using iomap_begin() as a way
1534 	 * to look up our filesystem block.
1535 	 */
1536 	pos = (loff_t)xas.xa_index << PAGE_SHIFT;
1537 	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1538 	if (error)
1539 		goto unlock_entry;
1540 
1541 	if (iomap.offset + iomap.length < pos + PMD_SIZE)
1542 		goto finish_iomap;
1543 
1544 	sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
1545 
1546 	switch (iomap.type) {
1547 	case IOMAP_MAPPED:
1548 		error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1549 		if (error < 0)
1550 			goto finish_iomap;
1551 
1552 		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1553 						DAX_PMD, write && !sync);
1554 
1555 		/*
1556 		 * If we are doing synchronous page fault and inode needs fsync,
1557 		 * we can insert PMD into page tables only after that happens.
1558 		 * Skip insertion for now and return the pfn so that caller can
1559 		 * insert it after fsync is done.
1560 		 */
1561 		if (sync) {
1562 			if (WARN_ON_ONCE(!pfnp))
1563 				goto finish_iomap;
1564 			*pfnp = pfn;
1565 			result = VM_FAULT_NEEDDSYNC;
1566 			goto finish_iomap;
1567 		}
1568 
1569 		trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1570 		result = vmf_insert_pfn_pmd(vmf, pfn, write);
1571 		break;
1572 	case IOMAP_UNWRITTEN:
1573 	case IOMAP_HOLE:
1574 		if (WARN_ON_ONCE(write))
1575 			break;
1576 		result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry);
1577 		break;
1578 	default:
1579 		WARN_ON_ONCE(1);
1580 		break;
1581 	}
1582 
1583  finish_iomap:
1584 	if (ops->iomap_end) {
1585 		int copied = PMD_SIZE;
1586 
1587 		if (result == VM_FAULT_FALLBACK)
1588 			copied = 0;
1589 		/*
1590 		 * The fault is done by now and there's no way back (other
1591 		 * thread may be already happily using PMD we have installed).
1592 		 * Just ignore error from ->iomap_end since we cannot do much
1593 		 * with it.
1594 		 */
1595 		ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1596 				&iomap);
1597 	}
1598  unlock_entry:
1599 	dax_unlock_entry(&xas, entry);
1600  fallback:
1601 	if (result == VM_FAULT_FALLBACK) {
1602 		split_huge_pmd(vma, vmf->pmd, vmf->address);
1603 		count_vm_event(THP_FAULT_FALLBACK);
1604 	}
1605 out:
1606 	trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1607 	return result;
1608 }
1609 #else
1610 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1611 			       const struct iomap_ops *ops)
1612 {
1613 	return VM_FAULT_FALLBACK;
1614 }
1615 #endif /* CONFIG_FS_DAX_PMD */
1616 
1617 /**
1618  * dax_iomap_fault - handle a page fault on a DAX file
1619  * @vmf: The description of the fault
1620  * @pe_size: Size of the page to fault in
1621  * @pfnp: PFN to insert for synchronous faults if fsync is required
1622  * @iomap_errp: Storage for detailed error code in case of error
1623  * @ops: Iomap ops passed from the file system
1624  *
1625  * When a page fault occurs, filesystems may call this helper in
1626  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1627  * has done all the necessary locking for page fault to proceed
1628  * successfully.
1629  */
1630 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1631 		    pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1632 {
1633 	switch (pe_size) {
1634 	case PE_SIZE_PTE:
1635 		return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1636 	case PE_SIZE_PMD:
1637 		return dax_iomap_pmd_fault(vmf, pfnp, ops);
1638 	default:
1639 		return VM_FAULT_FALLBACK;
1640 	}
1641 }
1642 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1643 
1644 /*
1645  * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1646  * @vmf: The description of the fault
1647  * @pfn: PFN to insert
1648  * @order: Order of entry to insert.
1649  *
1650  * This function inserts a writeable PTE or PMD entry into the page tables
1651  * for an mmaped DAX file.  It also marks the page cache entry as dirty.
1652  */
1653 static vm_fault_t
1654 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1655 {
1656 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1657 	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1658 	void *entry;
1659 	vm_fault_t ret;
1660 
1661 	xas_lock_irq(&xas);
1662 	entry = get_unlocked_entry(&xas);
1663 	/* Did we race with someone splitting entry or so? */
1664 	if (!entry ||
1665 	    (order == 0 && !dax_is_pte_entry(entry)) ||
1666 	    (order == PMD_ORDER && !dax_is_pmd_entry(entry))) {
1667 		put_unlocked_entry(&xas, entry);
1668 		xas_unlock_irq(&xas);
1669 		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1670 						      VM_FAULT_NOPAGE);
1671 		return VM_FAULT_NOPAGE;
1672 	}
1673 	xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1674 	dax_lock_entry(&xas, entry);
1675 	xas_unlock_irq(&xas);
1676 	if (order == 0)
1677 		ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1678 #ifdef CONFIG_FS_DAX_PMD
1679 	else if (order == PMD_ORDER)
1680 		ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
1681 #endif
1682 	else
1683 		ret = VM_FAULT_FALLBACK;
1684 	dax_unlock_entry(&xas, entry);
1685 	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1686 	return ret;
1687 }
1688 
1689 /**
1690  * dax_finish_sync_fault - finish synchronous page fault
1691  * @vmf: The description of the fault
1692  * @pe_size: Size of entry to be inserted
1693  * @pfn: PFN to insert
1694  *
1695  * This function ensures that the file range touched by the page fault is
1696  * stored persistently on the media and handles inserting of appropriate page
1697  * table entry.
1698  */
1699 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1700 		enum page_entry_size pe_size, pfn_t pfn)
1701 {
1702 	int err;
1703 	loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1704 	unsigned int order = pe_order(pe_size);
1705 	size_t len = PAGE_SIZE << order;
1706 
1707 	err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1708 	if (err)
1709 		return VM_FAULT_SIGBUS;
1710 	return dax_insert_pfn_mkwrite(vmf, pfn, order);
1711 }
1712 EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
1713