xref: /linux/fs/dax.c (revision a89988a6e00c5a099ee23619cd91dc8dc7ec9328)
1 /*
2  * fs/dax.c - Direct Access filesystem code
3  * Copyright (c) 2013-2014 Intel Corporation
4  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  */
16 
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/sched/signal.h>
31 #include <linux/uio.h>
32 #include <linux/vmstat.h>
33 #include <linux/pfn_t.h>
34 #include <linux/sizes.h>
35 #include <linux/mmu_notifier.h>
36 #include <linux/iomap.h>
37 #include "internal.h"
38 
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/fs_dax.h>
41 
42 /* We choose 4096 entries - same as per-zone page wait tables */
43 #define DAX_WAIT_TABLE_BITS 12
44 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
45 
46 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
47 
48 static int __init init_dax_wait_table(void)
49 {
50 	int i;
51 
52 	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
53 		init_waitqueue_head(wait_table + i);
54 	return 0;
55 }
56 fs_initcall(init_dax_wait_table);
57 
58 static int dax_is_pmd_entry(void *entry)
59 {
60 	return (unsigned long)entry & RADIX_DAX_PMD;
61 }
62 
63 static int dax_is_pte_entry(void *entry)
64 {
65 	return !((unsigned long)entry & RADIX_DAX_PMD);
66 }
67 
68 static int dax_is_zero_entry(void *entry)
69 {
70 	return (unsigned long)entry & RADIX_DAX_HZP;
71 }
72 
73 static int dax_is_empty_entry(void *entry)
74 {
75 	return (unsigned long)entry & RADIX_DAX_EMPTY;
76 }
77 
78 /*
79  * DAX radix tree locking
80  */
81 struct exceptional_entry_key {
82 	struct address_space *mapping;
83 	pgoff_t entry_start;
84 };
85 
86 struct wait_exceptional_entry_queue {
87 	wait_queue_t wait;
88 	struct exceptional_entry_key key;
89 };
90 
91 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
92 		pgoff_t index, void *entry, struct exceptional_entry_key *key)
93 {
94 	unsigned long hash;
95 
96 	/*
97 	 * If 'entry' is a PMD, align the 'index' that we use for the wait
98 	 * queue to the start of that PMD.  This ensures that all offsets in
99 	 * the range covered by the PMD map to the same bit lock.
100 	 */
101 	if (dax_is_pmd_entry(entry))
102 		index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
103 
104 	key->mapping = mapping;
105 	key->entry_start = index;
106 
107 	hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
108 	return wait_table + hash;
109 }
110 
111 static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
112 				       int sync, void *keyp)
113 {
114 	struct exceptional_entry_key *key = keyp;
115 	struct wait_exceptional_entry_queue *ewait =
116 		container_of(wait, struct wait_exceptional_entry_queue, wait);
117 
118 	if (key->mapping != ewait->key.mapping ||
119 	    key->entry_start != ewait->key.entry_start)
120 		return 0;
121 	return autoremove_wake_function(wait, mode, sync, NULL);
122 }
123 
124 /*
125  * Check whether the given slot is locked. The function must be called with
126  * mapping->tree_lock held
127  */
128 static inline int slot_locked(struct address_space *mapping, void **slot)
129 {
130 	unsigned long entry = (unsigned long)
131 		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
132 	return entry & RADIX_DAX_ENTRY_LOCK;
133 }
134 
135 /*
136  * Mark the given slot is locked. The function must be called with
137  * mapping->tree_lock held
138  */
139 static inline void *lock_slot(struct address_space *mapping, void **slot)
140 {
141 	unsigned long entry = (unsigned long)
142 		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
143 
144 	entry |= RADIX_DAX_ENTRY_LOCK;
145 	radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
146 	return (void *)entry;
147 }
148 
149 /*
150  * Mark the given slot is unlocked. The function must be called with
151  * mapping->tree_lock held
152  */
153 static inline void *unlock_slot(struct address_space *mapping, void **slot)
154 {
155 	unsigned long entry = (unsigned long)
156 		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
157 
158 	entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
159 	radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
160 	return (void *)entry;
161 }
162 
163 /*
164  * Lookup entry in radix tree, wait for it to become unlocked if it is
165  * exceptional entry and return it. The caller must call
166  * put_unlocked_mapping_entry() when he decided not to lock the entry or
167  * put_locked_mapping_entry() when he locked the entry and now wants to
168  * unlock it.
169  *
170  * The function must be called with mapping->tree_lock held.
171  */
172 static void *get_unlocked_mapping_entry(struct address_space *mapping,
173 					pgoff_t index, void ***slotp)
174 {
175 	void *entry, **slot;
176 	struct wait_exceptional_entry_queue ewait;
177 	wait_queue_head_t *wq;
178 
179 	init_wait(&ewait.wait);
180 	ewait.wait.func = wake_exceptional_entry_func;
181 
182 	for (;;) {
183 		entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
184 					  &slot);
185 		if (!entry || !radix_tree_exceptional_entry(entry) ||
186 		    !slot_locked(mapping, slot)) {
187 			if (slotp)
188 				*slotp = slot;
189 			return entry;
190 		}
191 
192 		wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
193 		prepare_to_wait_exclusive(wq, &ewait.wait,
194 					  TASK_UNINTERRUPTIBLE);
195 		spin_unlock_irq(&mapping->tree_lock);
196 		schedule();
197 		finish_wait(wq, &ewait.wait);
198 		spin_lock_irq(&mapping->tree_lock);
199 	}
200 }
201 
202 static void dax_unlock_mapping_entry(struct address_space *mapping,
203 				     pgoff_t index)
204 {
205 	void *entry, **slot;
206 
207 	spin_lock_irq(&mapping->tree_lock);
208 	entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
209 	if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
210 			 !slot_locked(mapping, slot))) {
211 		spin_unlock_irq(&mapping->tree_lock);
212 		return;
213 	}
214 	unlock_slot(mapping, slot);
215 	spin_unlock_irq(&mapping->tree_lock);
216 	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
217 }
218 
219 static void put_locked_mapping_entry(struct address_space *mapping,
220 				     pgoff_t index, void *entry)
221 {
222 	if (!radix_tree_exceptional_entry(entry)) {
223 		unlock_page(entry);
224 		put_page(entry);
225 	} else {
226 		dax_unlock_mapping_entry(mapping, index);
227 	}
228 }
229 
230 /*
231  * Called when we are done with radix tree entry we looked up via
232  * get_unlocked_mapping_entry() and which we didn't lock in the end.
233  */
234 static void put_unlocked_mapping_entry(struct address_space *mapping,
235 				       pgoff_t index, void *entry)
236 {
237 	if (!radix_tree_exceptional_entry(entry))
238 		return;
239 
240 	/* We have to wake up next waiter for the radix tree entry lock */
241 	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
242 }
243 
244 /*
245  * Find radix tree entry at given index. If it points to a page, return with
246  * the page locked. If it points to the exceptional entry, return with the
247  * radix tree entry locked. If the radix tree doesn't contain given index,
248  * create empty exceptional entry for the index and return with it locked.
249  *
250  * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
251  * either return that locked entry or will return an error.  This error will
252  * happen if there are any 4k entries (either zero pages or DAX entries)
253  * within the 2MiB range that we are requesting.
254  *
255  * We always favor 4k entries over 2MiB entries. There isn't a flow where we
256  * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
257  * insertion will fail if it finds any 4k entries already in the tree, and a
258  * 4k insertion will cause an existing 2MiB entry to be unmapped and
259  * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
260  * well as 2MiB empty entries.
261  *
262  * The exception to this downgrade path is for 2MiB DAX PMD entries that have
263  * real storage backing them.  We will leave these real 2MiB DAX entries in
264  * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
265  *
266  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
267  * persistent memory the benefit is doubtful. We can add that later if we can
268  * show it helps.
269  */
270 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
271 		unsigned long size_flag)
272 {
273 	bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
274 	void *entry, **slot;
275 
276 restart:
277 	spin_lock_irq(&mapping->tree_lock);
278 	entry = get_unlocked_mapping_entry(mapping, index, &slot);
279 
280 	if (entry) {
281 		if (size_flag & RADIX_DAX_PMD) {
282 			if (!radix_tree_exceptional_entry(entry) ||
283 			    dax_is_pte_entry(entry)) {
284 				put_unlocked_mapping_entry(mapping, index,
285 						entry);
286 				entry = ERR_PTR(-EEXIST);
287 				goto out_unlock;
288 			}
289 		} else { /* trying to grab a PTE entry */
290 			if (radix_tree_exceptional_entry(entry) &&
291 			    dax_is_pmd_entry(entry) &&
292 			    (dax_is_zero_entry(entry) ||
293 			     dax_is_empty_entry(entry))) {
294 				pmd_downgrade = true;
295 			}
296 		}
297 	}
298 
299 	/* No entry for given index? Make sure radix tree is big enough. */
300 	if (!entry || pmd_downgrade) {
301 		int err;
302 
303 		if (pmd_downgrade) {
304 			/*
305 			 * Make sure 'entry' remains valid while we drop
306 			 * mapping->tree_lock.
307 			 */
308 			entry = lock_slot(mapping, slot);
309 		}
310 
311 		spin_unlock_irq(&mapping->tree_lock);
312 		/*
313 		 * Besides huge zero pages the only other thing that gets
314 		 * downgraded are empty entries which don't need to be
315 		 * unmapped.
316 		 */
317 		if (pmd_downgrade && dax_is_zero_entry(entry))
318 			unmap_mapping_range(mapping,
319 				(index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
320 
321 		err = radix_tree_preload(
322 				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
323 		if (err) {
324 			if (pmd_downgrade)
325 				put_locked_mapping_entry(mapping, index, entry);
326 			return ERR_PTR(err);
327 		}
328 		spin_lock_irq(&mapping->tree_lock);
329 
330 		if (!entry) {
331 			/*
332 			 * We needed to drop the page_tree lock while calling
333 			 * radix_tree_preload() and we didn't have an entry to
334 			 * lock.  See if another thread inserted an entry at
335 			 * our index during this time.
336 			 */
337 			entry = __radix_tree_lookup(&mapping->page_tree, index,
338 					NULL, &slot);
339 			if (entry) {
340 				radix_tree_preload_end();
341 				spin_unlock_irq(&mapping->tree_lock);
342 				goto restart;
343 			}
344 		}
345 
346 		if (pmd_downgrade) {
347 			radix_tree_delete(&mapping->page_tree, index);
348 			mapping->nrexceptional--;
349 			dax_wake_mapping_entry_waiter(mapping, index, entry,
350 					true);
351 		}
352 
353 		entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
354 
355 		err = __radix_tree_insert(&mapping->page_tree, index,
356 				dax_radix_order(entry), entry);
357 		radix_tree_preload_end();
358 		if (err) {
359 			spin_unlock_irq(&mapping->tree_lock);
360 			/*
361 			 * Our insertion of a DAX entry failed, most likely
362 			 * because we were inserting a PMD entry and it
363 			 * collided with a PTE sized entry at a different
364 			 * index in the PMD range.  We haven't inserted
365 			 * anything into the radix tree and have no waiters to
366 			 * wake.
367 			 */
368 			return ERR_PTR(err);
369 		}
370 		/* Good, we have inserted empty locked entry into the tree. */
371 		mapping->nrexceptional++;
372 		spin_unlock_irq(&mapping->tree_lock);
373 		return entry;
374 	}
375 	/* Normal page in radix tree? */
376 	if (!radix_tree_exceptional_entry(entry)) {
377 		struct page *page = entry;
378 
379 		get_page(page);
380 		spin_unlock_irq(&mapping->tree_lock);
381 		lock_page(page);
382 		/* Page got truncated? Retry... */
383 		if (unlikely(page->mapping != mapping)) {
384 			unlock_page(page);
385 			put_page(page);
386 			goto restart;
387 		}
388 		return page;
389 	}
390 	entry = lock_slot(mapping, slot);
391  out_unlock:
392 	spin_unlock_irq(&mapping->tree_lock);
393 	return entry;
394 }
395 
396 /*
397  * We do not necessarily hold the mapping->tree_lock when we call this
398  * function so it is possible that 'entry' is no longer a valid item in the
399  * radix tree.  This is okay because all we really need to do is to find the
400  * correct waitqueue where tasks might be waiting for that old 'entry' and
401  * wake them.
402  */
403 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
404 		pgoff_t index, void *entry, bool wake_all)
405 {
406 	struct exceptional_entry_key key;
407 	wait_queue_head_t *wq;
408 
409 	wq = dax_entry_waitqueue(mapping, index, entry, &key);
410 
411 	/*
412 	 * Checking for locked entry and prepare_to_wait_exclusive() happens
413 	 * under mapping->tree_lock, ditto for entry handling in our callers.
414 	 * So at this point all tasks that could have seen our entry locked
415 	 * must be in the waitqueue and the following check will see them.
416 	 */
417 	if (waitqueue_active(wq))
418 		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
419 }
420 
421 static int __dax_invalidate_mapping_entry(struct address_space *mapping,
422 					  pgoff_t index, bool trunc)
423 {
424 	int ret = 0;
425 	void *entry;
426 	struct radix_tree_root *page_tree = &mapping->page_tree;
427 
428 	spin_lock_irq(&mapping->tree_lock);
429 	entry = get_unlocked_mapping_entry(mapping, index, NULL);
430 	if (!entry || !radix_tree_exceptional_entry(entry))
431 		goto out;
432 	if (!trunc &&
433 	    (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
434 	     radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
435 		goto out;
436 	radix_tree_delete(page_tree, index);
437 	mapping->nrexceptional--;
438 	ret = 1;
439 out:
440 	put_unlocked_mapping_entry(mapping, index, entry);
441 	spin_unlock_irq(&mapping->tree_lock);
442 	return ret;
443 }
444 /*
445  * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
446  * entry to get unlocked before deleting it.
447  */
448 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
449 {
450 	int ret = __dax_invalidate_mapping_entry(mapping, index, true);
451 
452 	/*
453 	 * This gets called from truncate / punch_hole path. As such, the caller
454 	 * must hold locks protecting against concurrent modifications of the
455 	 * radix tree (usually fs-private i_mmap_sem for writing). Since the
456 	 * caller has seen exceptional entry for this index, we better find it
457 	 * at that index as well...
458 	 */
459 	WARN_ON_ONCE(!ret);
460 	return ret;
461 }
462 
463 /*
464  * Invalidate exceptional DAX entry if easily possible. This handles DAX
465  * entries for invalidate_inode_pages() so we evict the entry only if we can
466  * do so without blocking.
467  */
468 int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
469 {
470 	int ret = 0;
471 	void *entry, **slot;
472 	struct radix_tree_root *page_tree = &mapping->page_tree;
473 
474 	spin_lock_irq(&mapping->tree_lock);
475 	entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
476 	if (!entry || !radix_tree_exceptional_entry(entry) ||
477 	    slot_locked(mapping, slot))
478 		goto out;
479 	if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
480 	    radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
481 		goto out;
482 	radix_tree_delete(page_tree, index);
483 	mapping->nrexceptional--;
484 	ret = 1;
485 out:
486 	spin_unlock_irq(&mapping->tree_lock);
487 	if (ret)
488 		dax_wake_mapping_entry_waiter(mapping, index, entry, true);
489 	return ret;
490 }
491 
492 /*
493  * Invalidate exceptional DAX entry if it is clean.
494  */
495 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
496 				      pgoff_t index)
497 {
498 	return __dax_invalidate_mapping_entry(mapping, index, false);
499 }
500 
501 /*
502  * The user has performed a load from a hole in the file.  Allocating
503  * a new page in the file would cause excessive storage usage for
504  * workloads with sparse files.  We allocate a page cache page instead.
505  * We'll kick it out of the page cache if it's ever written to,
506  * otherwise it will simply fall out of the page cache under memory
507  * pressure without ever having been dirtied.
508  */
509 static int dax_load_hole(struct address_space *mapping, void **entry,
510 			 struct vm_fault *vmf)
511 {
512 	struct page *page;
513 	int ret;
514 
515 	/* Hole page already exists? Return it...  */
516 	if (!radix_tree_exceptional_entry(*entry)) {
517 		page = *entry;
518 		goto out;
519 	}
520 
521 	/* This will replace locked radix tree entry with a hole page */
522 	page = find_or_create_page(mapping, vmf->pgoff,
523 				   vmf->gfp_mask | __GFP_ZERO);
524 	if (!page)
525 		return VM_FAULT_OOM;
526  out:
527 	vmf->page = page;
528 	ret = finish_fault(vmf);
529 	vmf->page = NULL;
530 	*entry = page;
531 	if (!ret) {
532 		/* Grab reference for PTE that is now referencing the page */
533 		get_page(page);
534 		return VM_FAULT_NOPAGE;
535 	}
536 	return ret;
537 }
538 
539 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
540 		sector_t sector, size_t size, struct page *to,
541 		unsigned long vaddr)
542 {
543 	void *vto, *kaddr;
544 	pgoff_t pgoff;
545 	pfn_t pfn;
546 	long rc;
547 	int id;
548 
549 	rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
550 	if (rc)
551 		return rc;
552 
553 	id = dax_read_lock();
554 	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
555 	if (rc < 0) {
556 		dax_read_unlock(id);
557 		return rc;
558 	}
559 	vto = kmap_atomic(to);
560 	copy_user_page(vto, (void __force *)kaddr, vaddr, to);
561 	kunmap_atomic(vto);
562 	dax_read_unlock(id);
563 	return 0;
564 }
565 
566 /*
567  * By this point grab_mapping_entry() has ensured that we have a locked entry
568  * of the appropriate size so we don't have to worry about downgrading PMDs to
569  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
570  * already in the tree, we will skip the insertion and just dirty the PMD as
571  * appropriate.
572  */
573 static void *dax_insert_mapping_entry(struct address_space *mapping,
574 				      struct vm_fault *vmf,
575 				      void *entry, sector_t sector,
576 				      unsigned long flags)
577 {
578 	struct radix_tree_root *page_tree = &mapping->page_tree;
579 	int error = 0;
580 	bool hole_fill = false;
581 	void *new_entry;
582 	pgoff_t index = vmf->pgoff;
583 
584 	if (vmf->flags & FAULT_FLAG_WRITE)
585 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
586 
587 	/* Replacing hole page with block mapping? */
588 	if (!radix_tree_exceptional_entry(entry)) {
589 		hole_fill = true;
590 		/*
591 		 * Unmap the page now before we remove it from page cache below.
592 		 * The page is locked so it cannot be faulted in again.
593 		 */
594 		unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
595 				    PAGE_SIZE, 0);
596 		error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
597 		if (error)
598 			return ERR_PTR(error);
599 	} else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
600 		/* replacing huge zero page with PMD block mapping */
601 		unmap_mapping_range(mapping,
602 			(vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
603 	}
604 
605 	spin_lock_irq(&mapping->tree_lock);
606 	new_entry = dax_radix_locked_entry(sector, flags);
607 
608 	if (hole_fill) {
609 		__delete_from_page_cache(entry, NULL);
610 		/* Drop pagecache reference */
611 		put_page(entry);
612 		error = __radix_tree_insert(page_tree, index,
613 				dax_radix_order(new_entry), new_entry);
614 		if (error) {
615 			new_entry = ERR_PTR(error);
616 			goto unlock;
617 		}
618 		mapping->nrexceptional++;
619 	} else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
620 		/*
621 		 * Only swap our new entry into the radix tree if the current
622 		 * entry is a zero page or an empty entry.  If a normal PTE or
623 		 * PMD entry is already in the tree, we leave it alone.  This
624 		 * means that if we are trying to insert a PTE and the
625 		 * existing entry is a PMD, we will just leave the PMD in the
626 		 * tree and dirty it if necessary.
627 		 */
628 		struct radix_tree_node *node;
629 		void **slot;
630 		void *ret;
631 
632 		ret = __radix_tree_lookup(page_tree, index, &node, &slot);
633 		WARN_ON_ONCE(ret != entry);
634 		__radix_tree_replace(page_tree, node, slot,
635 				     new_entry, NULL, NULL);
636 	}
637 	if (vmf->flags & FAULT_FLAG_WRITE)
638 		radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
639  unlock:
640 	spin_unlock_irq(&mapping->tree_lock);
641 	if (hole_fill) {
642 		radix_tree_preload_end();
643 		/*
644 		 * We don't need hole page anymore, it has been replaced with
645 		 * locked radix tree entry now.
646 		 */
647 		if (mapping->a_ops->freepage)
648 			mapping->a_ops->freepage(entry);
649 		unlock_page(entry);
650 		put_page(entry);
651 	}
652 	return new_entry;
653 }
654 
655 static inline unsigned long
656 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
657 {
658 	unsigned long address;
659 
660 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
661 	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
662 	return address;
663 }
664 
665 /* Walk all mappings of a given index of a file and writeprotect them */
666 static void dax_mapping_entry_mkclean(struct address_space *mapping,
667 				      pgoff_t index, unsigned long pfn)
668 {
669 	struct vm_area_struct *vma;
670 	pte_t pte, *ptep = NULL;
671 	pmd_t *pmdp = NULL;
672 	spinlock_t *ptl;
673 	bool changed;
674 
675 	i_mmap_lock_read(mapping);
676 	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
677 		unsigned long address;
678 
679 		cond_resched();
680 
681 		if (!(vma->vm_flags & VM_SHARED))
682 			continue;
683 
684 		address = pgoff_address(index, vma);
685 		changed = false;
686 		if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
687 			continue;
688 
689 		if (pmdp) {
690 #ifdef CONFIG_FS_DAX_PMD
691 			pmd_t pmd;
692 
693 			if (pfn != pmd_pfn(*pmdp))
694 				goto unlock_pmd;
695 			if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
696 				goto unlock_pmd;
697 
698 			flush_cache_page(vma, address, pfn);
699 			pmd = pmdp_huge_clear_flush(vma, address, pmdp);
700 			pmd = pmd_wrprotect(pmd);
701 			pmd = pmd_mkclean(pmd);
702 			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
703 			changed = true;
704 unlock_pmd:
705 			spin_unlock(ptl);
706 #endif
707 		} else {
708 			if (pfn != pte_pfn(*ptep))
709 				goto unlock_pte;
710 			if (!pte_dirty(*ptep) && !pte_write(*ptep))
711 				goto unlock_pte;
712 
713 			flush_cache_page(vma, address, pfn);
714 			pte = ptep_clear_flush(vma, address, ptep);
715 			pte = pte_wrprotect(pte);
716 			pte = pte_mkclean(pte);
717 			set_pte_at(vma->vm_mm, address, ptep, pte);
718 			changed = true;
719 unlock_pte:
720 			pte_unmap_unlock(ptep, ptl);
721 		}
722 
723 		if (changed)
724 			mmu_notifier_invalidate_page(vma->vm_mm, address);
725 	}
726 	i_mmap_unlock_read(mapping);
727 }
728 
729 static int dax_writeback_one(struct block_device *bdev,
730 		struct dax_device *dax_dev, struct address_space *mapping,
731 		pgoff_t index, void *entry)
732 {
733 	struct radix_tree_root *page_tree = &mapping->page_tree;
734 	void *entry2, **slot, *kaddr;
735 	long ret = 0, id;
736 	sector_t sector;
737 	pgoff_t pgoff;
738 	size_t size;
739 	pfn_t pfn;
740 
741 	/*
742 	 * A page got tagged dirty in DAX mapping? Something is seriously
743 	 * wrong.
744 	 */
745 	if (WARN_ON(!radix_tree_exceptional_entry(entry)))
746 		return -EIO;
747 
748 	spin_lock_irq(&mapping->tree_lock);
749 	entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
750 	/* Entry got punched out / reallocated? */
751 	if (!entry2 || !radix_tree_exceptional_entry(entry2))
752 		goto put_unlocked;
753 	/*
754 	 * Entry got reallocated elsewhere? No need to writeback. We have to
755 	 * compare sectors as we must not bail out due to difference in lockbit
756 	 * or entry type.
757 	 */
758 	if (dax_radix_sector(entry2) != dax_radix_sector(entry))
759 		goto put_unlocked;
760 	if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
761 				dax_is_zero_entry(entry))) {
762 		ret = -EIO;
763 		goto put_unlocked;
764 	}
765 
766 	/* Another fsync thread may have already written back this entry */
767 	if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
768 		goto put_unlocked;
769 	/* Lock the entry to serialize with page faults */
770 	entry = lock_slot(mapping, slot);
771 	/*
772 	 * We can clear the tag now but we have to be careful so that concurrent
773 	 * dax_writeback_one() calls for the same index cannot finish before we
774 	 * actually flush the caches. This is achieved as the calls will look
775 	 * at the entry only under tree_lock and once they do that they will
776 	 * see the entry locked and wait for it to unlock.
777 	 */
778 	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
779 	spin_unlock_irq(&mapping->tree_lock);
780 
781 	/*
782 	 * Even if dax_writeback_mapping_range() was given a wbc->range_start
783 	 * in the middle of a PMD, the 'index' we are given will be aligned to
784 	 * the start index of the PMD, as will the sector we pull from
785 	 * 'entry'.  This allows us to flush for PMD_SIZE and not have to
786 	 * worry about partial PMD writebacks.
787 	 */
788 	sector = dax_radix_sector(entry);
789 	size = PAGE_SIZE << dax_radix_order(entry);
790 
791 	id = dax_read_lock();
792 	ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
793 	if (ret)
794 		goto dax_unlock;
795 
796 	/*
797 	 * dax_direct_access() may sleep, so cannot hold tree_lock over
798 	 * its invocation.
799 	 */
800 	ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn);
801 	if (ret < 0)
802 		goto dax_unlock;
803 
804 	if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) {
805 		ret = -EIO;
806 		goto dax_unlock;
807 	}
808 
809 	dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
810 	wb_cache_pmem(kaddr, size);
811 	/*
812 	 * After we have flushed the cache, we can clear the dirty tag. There
813 	 * cannot be new dirty data in the pfn after the flush has completed as
814 	 * the pfn mappings are writeprotected and fault waits for mapping
815 	 * entry lock.
816 	 */
817 	spin_lock_irq(&mapping->tree_lock);
818 	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
819 	spin_unlock_irq(&mapping->tree_lock);
820  dax_unlock:
821 	dax_read_unlock(id);
822 	put_locked_mapping_entry(mapping, index, entry);
823 	return ret;
824 
825  put_unlocked:
826 	put_unlocked_mapping_entry(mapping, index, entry2);
827 	spin_unlock_irq(&mapping->tree_lock);
828 	return ret;
829 }
830 
831 /*
832  * Flush the mapping to the persistent domain within the byte range of [start,
833  * end]. This is required by data integrity operations to ensure file data is
834  * on persistent storage prior to completion of the operation.
835  */
836 int dax_writeback_mapping_range(struct address_space *mapping,
837 		struct block_device *bdev, struct writeback_control *wbc)
838 {
839 	struct inode *inode = mapping->host;
840 	pgoff_t start_index, end_index;
841 	pgoff_t indices[PAGEVEC_SIZE];
842 	struct dax_device *dax_dev;
843 	struct pagevec pvec;
844 	bool done = false;
845 	int i, ret = 0;
846 
847 	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
848 		return -EIO;
849 
850 	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
851 		return 0;
852 
853 	dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
854 	if (!dax_dev)
855 		return -EIO;
856 
857 	start_index = wbc->range_start >> PAGE_SHIFT;
858 	end_index = wbc->range_end >> PAGE_SHIFT;
859 
860 	tag_pages_for_writeback(mapping, start_index, end_index);
861 
862 	pagevec_init(&pvec, 0);
863 	while (!done) {
864 		pvec.nr = find_get_entries_tag(mapping, start_index,
865 				PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
866 				pvec.pages, indices);
867 
868 		if (pvec.nr == 0)
869 			break;
870 
871 		for (i = 0; i < pvec.nr; i++) {
872 			if (indices[i] > end_index) {
873 				done = true;
874 				break;
875 			}
876 
877 			ret = dax_writeback_one(bdev, dax_dev, mapping,
878 					indices[i], pvec.pages[i]);
879 			if (ret < 0) {
880 				put_dax(dax_dev);
881 				return ret;
882 			}
883 		}
884 	}
885 	put_dax(dax_dev);
886 	return 0;
887 }
888 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
889 
890 static int dax_insert_mapping(struct address_space *mapping,
891 		struct block_device *bdev, struct dax_device *dax_dev,
892 		sector_t sector, size_t size, void **entryp,
893 		struct vm_area_struct *vma, struct vm_fault *vmf)
894 {
895 	unsigned long vaddr = vmf->address;
896 	void *entry = *entryp;
897 	void *ret, *kaddr;
898 	pgoff_t pgoff;
899 	int id, rc;
900 	pfn_t pfn;
901 
902 	rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
903 	if (rc)
904 		return rc;
905 
906 	id = dax_read_lock();
907 	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
908 	if (rc < 0) {
909 		dax_read_unlock(id);
910 		return rc;
911 	}
912 	dax_read_unlock(id);
913 
914 	ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0);
915 	if (IS_ERR(ret))
916 		return PTR_ERR(ret);
917 	*entryp = ret;
918 
919 	return vm_insert_mixed(vma, vaddr, pfn);
920 }
921 
922 /**
923  * dax_pfn_mkwrite - handle first write to DAX page
924  * @vmf: The description of the fault
925  */
926 int dax_pfn_mkwrite(struct vm_fault *vmf)
927 {
928 	struct file *file = vmf->vma->vm_file;
929 	struct address_space *mapping = file->f_mapping;
930 	void *entry, **slot;
931 	pgoff_t index = vmf->pgoff;
932 
933 	spin_lock_irq(&mapping->tree_lock);
934 	entry = get_unlocked_mapping_entry(mapping, index, &slot);
935 	if (!entry || !radix_tree_exceptional_entry(entry)) {
936 		if (entry)
937 			put_unlocked_mapping_entry(mapping, index, entry);
938 		spin_unlock_irq(&mapping->tree_lock);
939 		return VM_FAULT_NOPAGE;
940 	}
941 	radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
942 	entry = lock_slot(mapping, slot);
943 	spin_unlock_irq(&mapping->tree_lock);
944 	/*
945 	 * If we race with somebody updating the PTE and finish_mkwrite_fault()
946 	 * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
947 	 * the fault in either case.
948 	 */
949 	finish_mkwrite_fault(vmf);
950 	put_locked_mapping_entry(mapping, index, entry);
951 	return VM_FAULT_NOPAGE;
952 }
953 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
954 
955 static bool dax_range_is_aligned(struct block_device *bdev,
956 				 unsigned int offset, unsigned int length)
957 {
958 	unsigned short sector_size = bdev_logical_block_size(bdev);
959 
960 	if (!IS_ALIGNED(offset, sector_size))
961 		return false;
962 	if (!IS_ALIGNED(length, sector_size))
963 		return false;
964 
965 	return true;
966 }
967 
968 int __dax_zero_page_range(struct block_device *bdev,
969 		struct dax_device *dax_dev, sector_t sector,
970 		unsigned int offset, unsigned int size)
971 {
972 	if (dax_range_is_aligned(bdev, offset, size)) {
973 		sector_t start_sector = sector + (offset >> 9);
974 
975 		return blkdev_issue_zeroout(bdev, start_sector,
976 				size >> 9, GFP_NOFS, 0);
977 	} else {
978 		pgoff_t pgoff;
979 		long rc, id;
980 		void *kaddr;
981 		pfn_t pfn;
982 
983 		rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
984 		if (rc)
985 			return rc;
986 
987 		id = dax_read_lock();
988 		rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr,
989 				&pfn);
990 		if (rc < 0) {
991 			dax_read_unlock(id);
992 			return rc;
993 		}
994 		clear_pmem(kaddr + offset, size);
995 		dax_read_unlock(id);
996 	}
997 	return 0;
998 }
999 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
1000 
1001 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
1002 {
1003 	return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
1004 }
1005 
1006 static loff_t
1007 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1008 		struct iomap *iomap)
1009 {
1010 	struct block_device *bdev = iomap->bdev;
1011 	struct dax_device *dax_dev = iomap->dax_dev;
1012 	struct iov_iter *iter = data;
1013 	loff_t end = pos + length, done = 0;
1014 	ssize_t ret = 0;
1015 	int id;
1016 
1017 	if (iov_iter_rw(iter) == READ) {
1018 		end = min(end, i_size_read(inode));
1019 		if (pos >= end)
1020 			return 0;
1021 
1022 		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1023 			return iov_iter_zero(min(length, end - pos), iter);
1024 	}
1025 
1026 	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1027 		return -EIO;
1028 
1029 	/*
1030 	 * Write can allocate block for an area which has a hole page mapped
1031 	 * into page tables. We have to tear down these mappings so that data
1032 	 * written by write(2) is visible in mmap.
1033 	 */
1034 	if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
1035 		invalidate_inode_pages2_range(inode->i_mapping,
1036 					      pos >> PAGE_SHIFT,
1037 					      (end - 1) >> PAGE_SHIFT);
1038 	}
1039 
1040 	id = dax_read_lock();
1041 	while (pos < end) {
1042 		unsigned offset = pos & (PAGE_SIZE - 1);
1043 		const size_t size = ALIGN(length + offset, PAGE_SIZE);
1044 		const sector_t sector = dax_iomap_sector(iomap, pos);
1045 		ssize_t map_len;
1046 		pgoff_t pgoff;
1047 		void *kaddr;
1048 		pfn_t pfn;
1049 
1050 		if (fatal_signal_pending(current)) {
1051 			ret = -EINTR;
1052 			break;
1053 		}
1054 
1055 		ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1056 		if (ret)
1057 			break;
1058 
1059 		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1060 				&kaddr, &pfn);
1061 		if (map_len < 0) {
1062 			ret = map_len;
1063 			break;
1064 		}
1065 
1066 		map_len = PFN_PHYS(map_len);
1067 		kaddr += offset;
1068 		map_len -= offset;
1069 		if (map_len > end - pos)
1070 			map_len = end - pos;
1071 
1072 		if (iov_iter_rw(iter) == WRITE)
1073 			map_len = copy_from_iter_pmem(kaddr, map_len, iter);
1074 		else
1075 			map_len = copy_to_iter(kaddr, map_len, iter);
1076 		if (map_len <= 0) {
1077 			ret = map_len ? map_len : -EFAULT;
1078 			break;
1079 		}
1080 
1081 		pos += map_len;
1082 		length -= map_len;
1083 		done += map_len;
1084 	}
1085 	dax_read_unlock(id);
1086 
1087 	return done ? done : ret;
1088 }
1089 
1090 /**
1091  * dax_iomap_rw - Perform I/O to a DAX file
1092  * @iocb:	The control block for this I/O
1093  * @iter:	The addresses to do I/O from or to
1094  * @ops:	iomap ops passed from the file system
1095  *
1096  * This function performs read and write operations to directly mapped
1097  * persistent memory.  The callers needs to take care of read/write exclusion
1098  * and evicting any page cache pages in the region under I/O.
1099  */
1100 ssize_t
1101 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1102 		const struct iomap_ops *ops)
1103 {
1104 	struct address_space *mapping = iocb->ki_filp->f_mapping;
1105 	struct inode *inode = mapping->host;
1106 	loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1107 	unsigned flags = 0;
1108 
1109 	if (iov_iter_rw(iter) == WRITE) {
1110 		lockdep_assert_held_exclusive(&inode->i_rwsem);
1111 		flags |= IOMAP_WRITE;
1112 	} else {
1113 		lockdep_assert_held(&inode->i_rwsem);
1114 	}
1115 
1116 	while (iov_iter_count(iter)) {
1117 		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1118 				iter, dax_iomap_actor);
1119 		if (ret <= 0)
1120 			break;
1121 		pos += ret;
1122 		done += ret;
1123 	}
1124 
1125 	iocb->ki_pos += done;
1126 	return done ? done : ret;
1127 }
1128 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1129 
1130 static int dax_fault_return(int error)
1131 {
1132 	if (error == 0)
1133 		return VM_FAULT_NOPAGE;
1134 	if (error == -ENOMEM)
1135 		return VM_FAULT_OOM;
1136 	return VM_FAULT_SIGBUS;
1137 }
1138 
1139 static int dax_iomap_pte_fault(struct vm_fault *vmf,
1140 			       const struct iomap_ops *ops)
1141 {
1142 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1143 	struct inode *inode = mapping->host;
1144 	unsigned long vaddr = vmf->address;
1145 	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1146 	sector_t sector;
1147 	struct iomap iomap = { 0 };
1148 	unsigned flags = IOMAP_FAULT;
1149 	int error, major = 0;
1150 	int vmf_ret = 0;
1151 	void *entry;
1152 
1153 	/*
1154 	 * Check whether offset isn't beyond end of file now. Caller is supposed
1155 	 * to hold locks serializing us with truncate / punch hole so this is
1156 	 * a reliable test.
1157 	 */
1158 	if (pos >= i_size_read(inode))
1159 		return VM_FAULT_SIGBUS;
1160 
1161 	if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1162 		flags |= IOMAP_WRITE;
1163 
1164 	/*
1165 	 * Note that we don't bother to use iomap_apply here: DAX required
1166 	 * the file system block size to be equal the page size, which means
1167 	 * that we never have to deal with more than a single extent here.
1168 	 */
1169 	error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1170 	if (error)
1171 		return dax_fault_return(error);
1172 	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1173 		vmf_ret = dax_fault_return(-EIO);	/* fs corruption? */
1174 		goto finish_iomap;
1175 	}
1176 
1177 	entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1178 	if (IS_ERR(entry)) {
1179 		vmf_ret = dax_fault_return(PTR_ERR(entry));
1180 		goto finish_iomap;
1181 	}
1182 
1183 	sector = dax_iomap_sector(&iomap, pos);
1184 
1185 	if (vmf->cow_page) {
1186 		switch (iomap.type) {
1187 		case IOMAP_HOLE:
1188 		case IOMAP_UNWRITTEN:
1189 			clear_user_highpage(vmf->cow_page, vaddr);
1190 			break;
1191 		case IOMAP_MAPPED:
1192 			error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1193 					sector, PAGE_SIZE, vmf->cow_page, vaddr);
1194 			break;
1195 		default:
1196 			WARN_ON_ONCE(1);
1197 			error = -EIO;
1198 			break;
1199 		}
1200 
1201 		if (error)
1202 			goto error_unlock_entry;
1203 
1204 		__SetPageUptodate(vmf->cow_page);
1205 		vmf_ret = finish_fault(vmf);
1206 		if (!vmf_ret)
1207 			vmf_ret = VM_FAULT_DONE_COW;
1208 		goto unlock_entry;
1209 	}
1210 
1211 	switch (iomap.type) {
1212 	case IOMAP_MAPPED:
1213 		if (iomap.flags & IOMAP_F_NEW) {
1214 			count_vm_event(PGMAJFAULT);
1215 			mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
1216 			major = VM_FAULT_MAJOR;
1217 		}
1218 		error = dax_insert_mapping(mapping, iomap.bdev, iomap.dax_dev,
1219 				sector, PAGE_SIZE, &entry, vmf->vma, vmf);
1220 		/* -EBUSY is fine, somebody else faulted on the same PTE */
1221 		if (error == -EBUSY)
1222 			error = 0;
1223 		break;
1224 	case IOMAP_UNWRITTEN:
1225 	case IOMAP_HOLE:
1226 		if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1227 			vmf_ret = dax_load_hole(mapping, &entry, vmf);
1228 			goto unlock_entry;
1229 		}
1230 		/*FALLTHRU*/
1231 	default:
1232 		WARN_ON_ONCE(1);
1233 		error = -EIO;
1234 		break;
1235 	}
1236 
1237  error_unlock_entry:
1238 	vmf_ret = dax_fault_return(error) | major;
1239  unlock_entry:
1240 	put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1241  finish_iomap:
1242 	if (ops->iomap_end) {
1243 		int copied = PAGE_SIZE;
1244 
1245 		if (vmf_ret & VM_FAULT_ERROR)
1246 			copied = 0;
1247 		/*
1248 		 * The fault is done by now and there's no way back (other
1249 		 * thread may be already happily using PTE we have installed).
1250 		 * Just ignore error from ->iomap_end since we cannot do much
1251 		 * with it.
1252 		 */
1253 		ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1254 	}
1255 	return vmf_ret;
1256 }
1257 
1258 #ifdef CONFIG_FS_DAX_PMD
1259 /*
1260  * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
1261  * more often than one might expect in the below functions.
1262  */
1263 #define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
1264 
1265 static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
1266 		loff_t pos, void **entryp)
1267 {
1268 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1269 	const sector_t sector = dax_iomap_sector(iomap, pos);
1270 	struct dax_device *dax_dev = iomap->dax_dev;
1271 	struct block_device *bdev = iomap->bdev;
1272 	struct inode *inode = mapping->host;
1273 	const size_t size = PMD_SIZE;
1274 	void *ret = NULL, *kaddr;
1275 	long length = 0;
1276 	pgoff_t pgoff;
1277 	pfn_t pfn;
1278 	int id;
1279 
1280 	if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0)
1281 		goto fallback;
1282 
1283 	id = dax_read_lock();
1284 	length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
1285 	if (length < 0)
1286 		goto unlock_fallback;
1287 	length = PFN_PHYS(length);
1288 
1289 	if (length < size)
1290 		goto unlock_fallback;
1291 	if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR)
1292 		goto unlock_fallback;
1293 	if (!pfn_t_devmap(pfn))
1294 		goto unlock_fallback;
1295 	dax_read_unlock(id);
1296 
1297 	ret = dax_insert_mapping_entry(mapping, vmf, *entryp, sector,
1298 			RADIX_DAX_PMD);
1299 	if (IS_ERR(ret))
1300 		goto fallback;
1301 	*entryp = ret;
1302 
1303 	trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret);
1304 	return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1305 			pfn, vmf->flags & FAULT_FLAG_WRITE);
1306 
1307 unlock_fallback:
1308 	dax_read_unlock(id);
1309 fallback:
1310 	trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret);
1311 	return VM_FAULT_FALLBACK;
1312 }
1313 
1314 static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1315 		void **entryp)
1316 {
1317 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1318 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1319 	struct inode *inode = mapping->host;
1320 	struct page *zero_page;
1321 	void *ret = NULL;
1322 	spinlock_t *ptl;
1323 	pmd_t pmd_entry;
1324 
1325 	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1326 
1327 	if (unlikely(!zero_page))
1328 		goto fallback;
1329 
1330 	ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
1331 			RADIX_DAX_PMD | RADIX_DAX_HZP);
1332 	if (IS_ERR(ret))
1333 		goto fallback;
1334 	*entryp = ret;
1335 
1336 	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1337 	if (!pmd_none(*(vmf->pmd))) {
1338 		spin_unlock(ptl);
1339 		goto fallback;
1340 	}
1341 
1342 	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1343 	pmd_entry = pmd_mkhuge(pmd_entry);
1344 	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1345 	spin_unlock(ptl);
1346 	trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1347 	return VM_FAULT_NOPAGE;
1348 
1349 fallback:
1350 	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1351 	return VM_FAULT_FALLBACK;
1352 }
1353 
1354 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1355 			       const struct iomap_ops *ops)
1356 {
1357 	struct vm_area_struct *vma = vmf->vma;
1358 	struct address_space *mapping = vma->vm_file->f_mapping;
1359 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1360 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1361 	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1362 	struct inode *inode = mapping->host;
1363 	int result = VM_FAULT_FALLBACK;
1364 	struct iomap iomap = { 0 };
1365 	pgoff_t max_pgoff, pgoff;
1366 	void *entry;
1367 	loff_t pos;
1368 	int error;
1369 
1370 	/*
1371 	 * Check whether offset isn't beyond end of file now. Caller is
1372 	 * supposed to hold locks serializing us with truncate / punch hole so
1373 	 * this is a reliable test.
1374 	 */
1375 	pgoff = linear_page_index(vma, pmd_addr);
1376 	max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
1377 
1378 	trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1379 
1380 	/* Fall back to PTEs if we're going to COW */
1381 	if (write && !(vma->vm_flags & VM_SHARED))
1382 		goto fallback;
1383 
1384 	/* If the PMD would extend outside the VMA */
1385 	if (pmd_addr < vma->vm_start)
1386 		goto fallback;
1387 	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1388 		goto fallback;
1389 
1390 	if (pgoff > max_pgoff) {
1391 		result = VM_FAULT_SIGBUS;
1392 		goto out;
1393 	}
1394 
1395 	/* If the PMD would extend beyond the file size */
1396 	if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
1397 		goto fallback;
1398 
1399 	/*
1400 	 * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1401 	 * setting up a mapping, so really we're using iomap_begin() as a way
1402 	 * to look up our filesystem block.
1403 	 */
1404 	pos = (loff_t)pgoff << PAGE_SHIFT;
1405 	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1406 	if (error)
1407 		goto fallback;
1408 
1409 	if (iomap.offset + iomap.length < pos + PMD_SIZE)
1410 		goto finish_iomap;
1411 
1412 	/*
1413 	 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1414 	 * PMD or a HZP entry.  If it can't (because a 4k page is already in
1415 	 * the tree, for instance), it will return -EEXIST and we just fall
1416 	 * back to 4k entries.
1417 	 */
1418 	entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1419 	if (IS_ERR(entry))
1420 		goto finish_iomap;
1421 
1422 	switch (iomap.type) {
1423 	case IOMAP_MAPPED:
1424 		result = dax_pmd_insert_mapping(vmf, &iomap, pos, &entry);
1425 		break;
1426 	case IOMAP_UNWRITTEN:
1427 	case IOMAP_HOLE:
1428 		if (WARN_ON_ONCE(write))
1429 			goto unlock_entry;
1430 		result = dax_pmd_load_hole(vmf, &iomap, &entry);
1431 		break;
1432 	default:
1433 		WARN_ON_ONCE(1);
1434 		break;
1435 	}
1436 
1437  unlock_entry:
1438 	put_locked_mapping_entry(mapping, pgoff, entry);
1439  finish_iomap:
1440 	if (ops->iomap_end) {
1441 		int copied = PMD_SIZE;
1442 
1443 		if (result == VM_FAULT_FALLBACK)
1444 			copied = 0;
1445 		/*
1446 		 * The fault is done by now and there's no way back (other
1447 		 * thread may be already happily using PMD we have installed).
1448 		 * Just ignore error from ->iomap_end since we cannot do much
1449 		 * with it.
1450 		 */
1451 		ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1452 				&iomap);
1453 	}
1454  fallback:
1455 	if (result == VM_FAULT_FALLBACK) {
1456 		split_huge_pmd(vma, vmf->pmd, vmf->address);
1457 		count_vm_event(THP_FAULT_FALLBACK);
1458 	}
1459 out:
1460 	trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1461 	return result;
1462 }
1463 #else
1464 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1465 			       const struct iomap_ops *ops)
1466 {
1467 	return VM_FAULT_FALLBACK;
1468 }
1469 #endif /* CONFIG_FS_DAX_PMD */
1470 
1471 /**
1472  * dax_iomap_fault - handle a page fault on a DAX file
1473  * @vmf: The description of the fault
1474  * @ops: iomap ops passed from the file system
1475  *
1476  * When a page fault occurs, filesystems may call this helper in
1477  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1478  * has done all the necessary locking for page fault to proceed
1479  * successfully.
1480  */
1481 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1482 		    const struct iomap_ops *ops)
1483 {
1484 	switch (pe_size) {
1485 	case PE_SIZE_PTE:
1486 		return dax_iomap_pte_fault(vmf, ops);
1487 	case PE_SIZE_PMD:
1488 		return dax_iomap_pmd_fault(vmf, ops);
1489 	default:
1490 		return VM_FAULT_FALLBACK;
1491 	}
1492 }
1493 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1494