xref: /linux/mm/huge_memory.c (revision 3f07c0144132e4f59d88055ac8ff3e691a5fa2b8)
1 /*
2  *  Copyright (C) 2009  Red Hat, Inc.
3  *
4  *  This work is licensed under the terms of the GNU GPL, version 2. See
5  *  the COPYING file in the top-level directory.
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/mm.h>
11 #include <linux/sched.h>
12 #include <linux/sched/coredump.h>
13 #include <linux/highmem.h>
14 #include <linux/hugetlb.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/shrinker.h>
19 #include <linux/mm_inline.h>
20 #include <linux/swapops.h>
21 #include <linux/dax.h>
22 #include <linux/khugepaged.h>
23 #include <linux/freezer.h>
24 #include <linux/pfn_t.h>
25 #include <linux/mman.h>
26 #include <linux/memremap.h>
27 #include <linux/pagemap.h>
28 #include <linux/debugfs.h>
29 #include <linux/migrate.h>
30 #include <linux/hashtable.h>
31 #include <linux/userfaultfd_k.h>
32 #include <linux/page_idle.h>
33 #include <linux/shmem_fs.h>
34 
35 #include <asm/tlb.h>
36 #include <asm/pgalloc.h>
37 #include "internal.h"
38 
39 /*
40  * By default transparent hugepage support is disabled in order that avoid
41  * to risk increase the memory footprint of applications without a guaranteed
42  * benefit. When transparent hugepage support is enabled, is for all mappings,
43  * and khugepaged scans all mappings.
44  * Defrag is invoked by khugepaged hugepage allocations and by page faults
45  * for all hugepage allocations.
46  */
47 unsigned long transparent_hugepage_flags __read_mostly =
48 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
49 	(1<<TRANSPARENT_HUGEPAGE_FLAG)|
50 #endif
51 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
52 	(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
53 #endif
54 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
55 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
56 	(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
57 
58 static struct shrinker deferred_split_shrinker;
59 
60 static atomic_t huge_zero_refcount;
61 struct page *huge_zero_page __read_mostly;
62 
63 static struct page *get_huge_zero_page(void)
64 {
65 	struct page *zero_page;
66 retry:
67 	if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
68 		return READ_ONCE(huge_zero_page);
69 
70 	zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
71 			HPAGE_PMD_ORDER);
72 	if (!zero_page) {
73 		count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
74 		return NULL;
75 	}
76 	count_vm_event(THP_ZERO_PAGE_ALLOC);
77 	preempt_disable();
78 	if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
79 		preempt_enable();
80 		__free_pages(zero_page, compound_order(zero_page));
81 		goto retry;
82 	}
83 
84 	/* We take additional reference here. It will be put back by shrinker */
85 	atomic_set(&huge_zero_refcount, 2);
86 	preempt_enable();
87 	return READ_ONCE(huge_zero_page);
88 }
89 
90 static void put_huge_zero_page(void)
91 {
92 	/*
93 	 * Counter should never go to zero here. Only shrinker can put
94 	 * last reference.
95 	 */
96 	BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
97 }
98 
99 struct page *mm_get_huge_zero_page(struct mm_struct *mm)
100 {
101 	if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
102 		return READ_ONCE(huge_zero_page);
103 
104 	if (!get_huge_zero_page())
105 		return NULL;
106 
107 	if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
108 		put_huge_zero_page();
109 
110 	return READ_ONCE(huge_zero_page);
111 }
112 
113 void mm_put_huge_zero_page(struct mm_struct *mm)
114 {
115 	if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
116 		put_huge_zero_page();
117 }
118 
119 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
120 					struct shrink_control *sc)
121 {
122 	/* we can free zero page only if last reference remains */
123 	return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
124 }
125 
126 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
127 				       struct shrink_control *sc)
128 {
129 	if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
130 		struct page *zero_page = xchg(&huge_zero_page, NULL);
131 		BUG_ON(zero_page == NULL);
132 		__free_pages(zero_page, compound_order(zero_page));
133 		return HPAGE_PMD_NR;
134 	}
135 
136 	return 0;
137 }
138 
139 static struct shrinker huge_zero_page_shrinker = {
140 	.count_objects = shrink_huge_zero_page_count,
141 	.scan_objects = shrink_huge_zero_page_scan,
142 	.seeks = DEFAULT_SEEKS,
143 };
144 
145 #ifdef CONFIG_SYSFS
146 static ssize_t enabled_show(struct kobject *kobj,
147 			    struct kobj_attribute *attr, char *buf)
148 {
149 	if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
150 		return sprintf(buf, "[always] madvise never\n");
151 	else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags))
152 		return sprintf(buf, "always [madvise] never\n");
153 	else
154 		return sprintf(buf, "always madvise [never]\n");
155 }
156 
157 static ssize_t enabled_store(struct kobject *kobj,
158 			     struct kobj_attribute *attr,
159 			     const char *buf, size_t count)
160 {
161 	ssize_t ret = count;
162 
163 	if (!memcmp("always", buf,
164 		    min(sizeof("always")-1, count))) {
165 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
166 		set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
167 	} else if (!memcmp("madvise", buf,
168 			   min(sizeof("madvise")-1, count))) {
169 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
170 		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
171 	} else if (!memcmp("never", buf,
172 			   min(sizeof("never")-1, count))) {
173 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
174 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
175 	} else
176 		ret = -EINVAL;
177 
178 	if (ret > 0) {
179 		int err = start_stop_khugepaged();
180 		if (err)
181 			ret = err;
182 	}
183 	return ret;
184 }
185 static struct kobj_attribute enabled_attr =
186 	__ATTR(enabled, 0644, enabled_show, enabled_store);
187 
188 ssize_t single_hugepage_flag_show(struct kobject *kobj,
189 				struct kobj_attribute *attr, char *buf,
190 				enum transparent_hugepage_flag flag)
191 {
192 	return sprintf(buf, "%d\n",
193 		       !!test_bit(flag, &transparent_hugepage_flags));
194 }
195 
196 ssize_t single_hugepage_flag_store(struct kobject *kobj,
197 				 struct kobj_attribute *attr,
198 				 const char *buf, size_t count,
199 				 enum transparent_hugepage_flag flag)
200 {
201 	unsigned long value;
202 	int ret;
203 
204 	ret = kstrtoul(buf, 10, &value);
205 	if (ret < 0)
206 		return ret;
207 	if (value > 1)
208 		return -EINVAL;
209 
210 	if (value)
211 		set_bit(flag, &transparent_hugepage_flags);
212 	else
213 		clear_bit(flag, &transparent_hugepage_flags);
214 
215 	return count;
216 }
217 
218 static ssize_t defrag_show(struct kobject *kobj,
219 			   struct kobj_attribute *attr, char *buf)
220 {
221 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
222 		return sprintf(buf, "[always] defer defer+madvise madvise never\n");
223 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
224 		return sprintf(buf, "always [defer] defer+madvise madvise never\n");
225 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
226 		return sprintf(buf, "always defer [defer+madvise] madvise never\n");
227 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
228 		return sprintf(buf, "always defer defer+madvise [madvise] never\n");
229 	return sprintf(buf, "always defer defer+madvise madvise [never]\n");
230 }
231 
232 static ssize_t defrag_store(struct kobject *kobj,
233 			    struct kobj_attribute *attr,
234 			    const char *buf, size_t count)
235 {
236 	if (!memcmp("always", buf,
237 		    min(sizeof("always")-1, count))) {
238 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
239 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
240 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
241 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
242 	} else if (!memcmp("defer", buf,
243 		    min(sizeof("defer")-1, count))) {
244 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
245 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
246 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
247 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
248 	} else if (!memcmp("defer+madvise", buf,
249 		    min(sizeof("defer+madvise")-1, count))) {
250 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
251 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
252 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
253 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
254 	} else if (!memcmp("madvise", buf,
255 			   min(sizeof("madvise")-1, count))) {
256 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
257 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
258 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
259 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
260 	} else if (!memcmp("never", buf,
261 			   min(sizeof("never")-1, count))) {
262 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
263 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
264 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
265 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
266 	} else
267 		return -EINVAL;
268 
269 	return count;
270 }
271 static struct kobj_attribute defrag_attr =
272 	__ATTR(defrag, 0644, defrag_show, defrag_store);
273 
274 static ssize_t use_zero_page_show(struct kobject *kobj,
275 		struct kobj_attribute *attr, char *buf)
276 {
277 	return single_hugepage_flag_show(kobj, attr, buf,
278 				TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
279 }
280 static ssize_t use_zero_page_store(struct kobject *kobj,
281 		struct kobj_attribute *attr, const char *buf, size_t count)
282 {
283 	return single_hugepage_flag_store(kobj, attr, buf, count,
284 				 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
285 }
286 static struct kobj_attribute use_zero_page_attr =
287 	__ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
288 
289 static ssize_t hpage_pmd_size_show(struct kobject *kobj,
290 		struct kobj_attribute *attr, char *buf)
291 {
292 	return sprintf(buf, "%lu\n", HPAGE_PMD_SIZE);
293 }
294 static struct kobj_attribute hpage_pmd_size_attr =
295 	__ATTR_RO(hpage_pmd_size);
296 
297 #ifdef CONFIG_DEBUG_VM
298 static ssize_t debug_cow_show(struct kobject *kobj,
299 				struct kobj_attribute *attr, char *buf)
300 {
301 	return single_hugepage_flag_show(kobj, attr, buf,
302 				TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
303 }
304 static ssize_t debug_cow_store(struct kobject *kobj,
305 			       struct kobj_attribute *attr,
306 			       const char *buf, size_t count)
307 {
308 	return single_hugepage_flag_store(kobj, attr, buf, count,
309 				 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
310 }
311 static struct kobj_attribute debug_cow_attr =
312 	__ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
313 #endif /* CONFIG_DEBUG_VM */
314 
315 static struct attribute *hugepage_attr[] = {
316 	&enabled_attr.attr,
317 	&defrag_attr.attr,
318 	&use_zero_page_attr.attr,
319 	&hpage_pmd_size_attr.attr,
320 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
321 	&shmem_enabled_attr.attr,
322 #endif
323 #ifdef CONFIG_DEBUG_VM
324 	&debug_cow_attr.attr,
325 #endif
326 	NULL,
327 };
328 
329 static struct attribute_group hugepage_attr_group = {
330 	.attrs = hugepage_attr,
331 };
332 
333 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
334 {
335 	int err;
336 
337 	*hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
338 	if (unlikely(!*hugepage_kobj)) {
339 		pr_err("failed to create transparent hugepage kobject\n");
340 		return -ENOMEM;
341 	}
342 
343 	err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
344 	if (err) {
345 		pr_err("failed to register transparent hugepage group\n");
346 		goto delete_obj;
347 	}
348 
349 	err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
350 	if (err) {
351 		pr_err("failed to register transparent hugepage group\n");
352 		goto remove_hp_group;
353 	}
354 
355 	return 0;
356 
357 remove_hp_group:
358 	sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
359 delete_obj:
360 	kobject_put(*hugepage_kobj);
361 	return err;
362 }
363 
364 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
365 {
366 	sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
367 	sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
368 	kobject_put(hugepage_kobj);
369 }
370 #else
371 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
372 {
373 	return 0;
374 }
375 
376 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
377 {
378 }
379 #endif /* CONFIG_SYSFS */
380 
381 static int __init hugepage_init(void)
382 {
383 	int err;
384 	struct kobject *hugepage_kobj;
385 
386 	if (!has_transparent_hugepage()) {
387 		transparent_hugepage_flags = 0;
388 		return -EINVAL;
389 	}
390 
391 	/*
392 	 * hugepages can't be allocated by the buddy allocator
393 	 */
394 	MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER);
395 	/*
396 	 * we use page->mapping and page->index in second tail page
397 	 * as list_head: assuming THP order >= 2
398 	 */
399 	MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
400 
401 	err = hugepage_init_sysfs(&hugepage_kobj);
402 	if (err)
403 		goto err_sysfs;
404 
405 	err = khugepaged_init();
406 	if (err)
407 		goto err_slab;
408 
409 	err = register_shrinker(&huge_zero_page_shrinker);
410 	if (err)
411 		goto err_hzp_shrinker;
412 	err = register_shrinker(&deferred_split_shrinker);
413 	if (err)
414 		goto err_split_shrinker;
415 
416 	/*
417 	 * By default disable transparent hugepages on smaller systems,
418 	 * where the extra memory used could hurt more than TLB overhead
419 	 * is likely to save.  The admin can still enable it through /sys.
420 	 */
421 	if (totalram_pages < (512 << (20 - PAGE_SHIFT))) {
422 		transparent_hugepage_flags = 0;
423 		return 0;
424 	}
425 
426 	err = start_stop_khugepaged();
427 	if (err)
428 		goto err_khugepaged;
429 
430 	return 0;
431 err_khugepaged:
432 	unregister_shrinker(&deferred_split_shrinker);
433 err_split_shrinker:
434 	unregister_shrinker(&huge_zero_page_shrinker);
435 err_hzp_shrinker:
436 	khugepaged_destroy();
437 err_slab:
438 	hugepage_exit_sysfs(hugepage_kobj);
439 err_sysfs:
440 	return err;
441 }
442 subsys_initcall(hugepage_init);
443 
444 static int __init setup_transparent_hugepage(char *str)
445 {
446 	int ret = 0;
447 	if (!str)
448 		goto out;
449 	if (!strcmp(str, "always")) {
450 		set_bit(TRANSPARENT_HUGEPAGE_FLAG,
451 			&transparent_hugepage_flags);
452 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
453 			  &transparent_hugepage_flags);
454 		ret = 1;
455 	} else if (!strcmp(str, "madvise")) {
456 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
457 			  &transparent_hugepage_flags);
458 		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
459 			&transparent_hugepage_flags);
460 		ret = 1;
461 	} else if (!strcmp(str, "never")) {
462 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
463 			  &transparent_hugepage_flags);
464 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
465 			  &transparent_hugepage_flags);
466 		ret = 1;
467 	}
468 out:
469 	if (!ret)
470 		pr_warn("transparent_hugepage= cannot parse, ignored\n");
471 	return ret;
472 }
473 __setup("transparent_hugepage=", setup_transparent_hugepage);
474 
475 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
476 {
477 	if (likely(vma->vm_flags & VM_WRITE))
478 		pmd = pmd_mkwrite(pmd);
479 	return pmd;
480 }
481 
482 static inline struct list_head *page_deferred_list(struct page *page)
483 {
484 	/*
485 	 * ->lru in the tail pages is occupied by compound_head.
486 	 * Let's use ->mapping + ->index in the second tail page as list_head.
487 	 */
488 	return (struct list_head *)&page[2].mapping;
489 }
490 
491 void prep_transhuge_page(struct page *page)
492 {
493 	/*
494 	 * we use page->mapping and page->indexlru in second tail page
495 	 * as list_head: assuming THP order >= 2
496 	 */
497 
498 	INIT_LIST_HEAD(page_deferred_list(page));
499 	set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
500 }
501 
502 unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len,
503 		loff_t off, unsigned long flags, unsigned long size)
504 {
505 	unsigned long addr;
506 	loff_t off_end = off + len;
507 	loff_t off_align = round_up(off, size);
508 	unsigned long len_pad;
509 
510 	if (off_end <= off_align || (off_end - off_align) < size)
511 		return 0;
512 
513 	len_pad = len + size;
514 	if (len_pad < len || (off + len_pad) < off)
515 		return 0;
516 
517 	addr = current->mm->get_unmapped_area(filp, 0, len_pad,
518 					      off >> PAGE_SHIFT, flags);
519 	if (IS_ERR_VALUE(addr))
520 		return 0;
521 
522 	addr += (off - addr) & (size - 1);
523 	return addr;
524 }
525 
526 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
527 		unsigned long len, unsigned long pgoff, unsigned long flags)
528 {
529 	loff_t off = (loff_t)pgoff << PAGE_SHIFT;
530 
531 	if (addr)
532 		goto out;
533 	if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD))
534 		goto out;
535 
536 	addr = __thp_get_unmapped_area(filp, len, off, flags, PMD_SIZE);
537 	if (addr)
538 		return addr;
539 
540  out:
541 	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
542 }
543 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
544 
545 static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
546 		gfp_t gfp)
547 {
548 	struct vm_area_struct *vma = vmf->vma;
549 	struct mem_cgroup *memcg;
550 	pgtable_t pgtable;
551 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
552 
553 	VM_BUG_ON_PAGE(!PageCompound(page), page);
554 
555 	if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) {
556 		put_page(page);
557 		count_vm_event(THP_FAULT_FALLBACK);
558 		return VM_FAULT_FALLBACK;
559 	}
560 
561 	pgtable = pte_alloc_one(vma->vm_mm, haddr);
562 	if (unlikely(!pgtable)) {
563 		mem_cgroup_cancel_charge(page, memcg, true);
564 		put_page(page);
565 		return VM_FAULT_OOM;
566 	}
567 
568 	clear_huge_page(page, haddr, HPAGE_PMD_NR);
569 	/*
570 	 * The memory barrier inside __SetPageUptodate makes sure that
571 	 * clear_huge_page writes become visible before the set_pmd_at()
572 	 * write.
573 	 */
574 	__SetPageUptodate(page);
575 
576 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
577 	if (unlikely(!pmd_none(*vmf->pmd))) {
578 		spin_unlock(vmf->ptl);
579 		mem_cgroup_cancel_charge(page, memcg, true);
580 		put_page(page);
581 		pte_free(vma->vm_mm, pgtable);
582 	} else {
583 		pmd_t entry;
584 
585 		/* Deliver the page fault to userland */
586 		if (userfaultfd_missing(vma)) {
587 			int ret;
588 
589 			spin_unlock(vmf->ptl);
590 			mem_cgroup_cancel_charge(page, memcg, true);
591 			put_page(page);
592 			pte_free(vma->vm_mm, pgtable);
593 			ret = handle_userfault(vmf, VM_UFFD_MISSING);
594 			VM_BUG_ON(ret & VM_FAULT_FALLBACK);
595 			return ret;
596 		}
597 
598 		entry = mk_huge_pmd(page, vma->vm_page_prot);
599 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
600 		page_add_new_anon_rmap(page, vma, haddr, true);
601 		mem_cgroup_commit_charge(page, memcg, false, true);
602 		lru_cache_add_active_or_unevictable(page, vma);
603 		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
604 		set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
605 		add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
606 		atomic_long_inc(&vma->vm_mm->nr_ptes);
607 		spin_unlock(vmf->ptl);
608 		count_vm_event(THP_FAULT_ALLOC);
609 	}
610 
611 	return 0;
612 }
613 
614 /*
615  * always: directly stall for all thp allocations
616  * defer: wake kswapd and fail if not immediately available
617  * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
618  *		  fail if not immediately available
619  * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
620  *	    available
621  * never: never stall for any thp allocation
622  */
623 static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
624 {
625 	const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
626 
627 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
628 		return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
629 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
630 		return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
631 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
632 		return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
633 							     __GFP_KSWAPD_RECLAIM);
634 	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
635 		return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
636 							     0);
637 	return GFP_TRANSHUGE_LIGHT;
638 }
639 
640 /* Caller must hold page table lock. */
641 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
642 		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
643 		struct page *zero_page)
644 {
645 	pmd_t entry;
646 	if (!pmd_none(*pmd))
647 		return false;
648 	entry = mk_pmd(zero_page, vma->vm_page_prot);
649 	entry = pmd_mkhuge(entry);
650 	if (pgtable)
651 		pgtable_trans_huge_deposit(mm, pmd, pgtable);
652 	set_pmd_at(mm, haddr, pmd, entry);
653 	atomic_long_inc(&mm->nr_ptes);
654 	return true;
655 }
656 
657 int do_huge_pmd_anonymous_page(struct vm_fault *vmf)
658 {
659 	struct vm_area_struct *vma = vmf->vma;
660 	gfp_t gfp;
661 	struct page *page;
662 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
663 
664 	if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
665 		return VM_FAULT_FALLBACK;
666 	if (unlikely(anon_vma_prepare(vma)))
667 		return VM_FAULT_OOM;
668 	if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
669 		return VM_FAULT_OOM;
670 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
671 			!mm_forbids_zeropage(vma->vm_mm) &&
672 			transparent_hugepage_use_zero_page()) {
673 		pgtable_t pgtable;
674 		struct page *zero_page;
675 		bool set;
676 		int ret;
677 		pgtable = pte_alloc_one(vma->vm_mm, haddr);
678 		if (unlikely(!pgtable))
679 			return VM_FAULT_OOM;
680 		zero_page = mm_get_huge_zero_page(vma->vm_mm);
681 		if (unlikely(!zero_page)) {
682 			pte_free(vma->vm_mm, pgtable);
683 			count_vm_event(THP_FAULT_FALLBACK);
684 			return VM_FAULT_FALLBACK;
685 		}
686 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
687 		ret = 0;
688 		set = false;
689 		if (pmd_none(*vmf->pmd)) {
690 			if (userfaultfd_missing(vma)) {
691 				spin_unlock(vmf->ptl);
692 				ret = handle_userfault(vmf, VM_UFFD_MISSING);
693 				VM_BUG_ON(ret & VM_FAULT_FALLBACK);
694 			} else {
695 				set_huge_zero_page(pgtable, vma->vm_mm, vma,
696 						   haddr, vmf->pmd, zero_page);
697 				spin_unlock(vmf->ptl);
698 				set = true;
699 			}
700 		} else
701 			spin_unlock(vmf->ptl);
702 		if (!set)
703 			pte_free(vma->vm_mm, pgtable);
704 		return ret;
705 	}
706 	gfp = alloc_hugepage_direct_gfpmask(vma);
707 	page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
708 	if (unlikely(!page)) {
709 		count_vm_event(THP_FAULT_FALLBACK);
710 		return VM_FAULT_FALLBACK;
711 	}
712 	prep_transhuge_page(page);
713 	return __do_huge_pmd_anonymous_page(vmf, page, gfp);
714 }
715 
716 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
717 		pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write)
718 {
719 	struct mm_struct *mm = vma->vm_mm;
720 	pmd_t entry;
721 	spinlock_t *ptl;
722 
723 	ptl = pmd_lock(mm, pmd);
724 	entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
725 	if (pfn_t_devmap(pfn))
726 		entry = pmd_mkdevmap(entry);
727 	if (write) {
728 		entry = pmd_mkyoung(pmd_mkdirty(entry));
729 		entry = maybe_pmd_mkwrite(entry, vma);
730 	}
731 	set_pmd_at(mm, addr, pmd, entry);
732 	update_mmu_cache_pmd(vma, addr, pmd);
733 	spin_unlock(ptl);
734 }
735 
736 int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
737 			pmd_t *pmd, pfn_t pfn, bool write)
738 {
739 	pgprot_t pgprot = vma->vm_page_prot;
740 	/*
741 	 * If we had pmd_special, we could avoid all these restrictions,
742 	 * but we need to be consistent with PTEs and architectures that
743 	 * can't support a 'special' bit.
744 	 */
745 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
746 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
747 						(VM_PFNMAP|VM_MIXEDMAP));
748 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
749 	BUG_ON(!pfn_t_devmap(pfn));
750 
751 	if (addr < vma->vm_start || addr >= vma->vm_end)
752 		return VM_FAULT_SIGBUS;
753 
754 	track_pfn_insert(vma, &pgprot, pfn);
755 
756 	insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write);
757 	return VM_FAULT_NOPAGE;
758 }
759 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
760 
761 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
762 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
763 {
764 	if (likely(vma->vm_flags & VM_WRITE))
765 		pud = pud_mkwrite(pud);
766 	return pud;
767 }
768 
769 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
770 		pud_t *pud, pfn_t pfn, pgprot_t prot, bool write)
771 {
772 	struct mm_struct *mm = vma->vm_mm;
773 	pud_t entry;
774 	spinlock_t *ptl;
775 
776 	ptl = pud_lock(mm, pud);
777 	entry = pud_mkhuge(pfn_t_pud(pfn, prot));
778 	if (pfn_t_devmap(pfn))
779 		entry = pud_mkdevmap(entry);
780 	if (write) {
781 		entry = pud_mkyoung(pud_mkdirty(entry));
782 		entry = maybe_pud_mkwrite(entry, vma);
783 	}
784 	set_pud_at(mm, addr, pud, entry);
785 	update_mmu_cache_pud(vma, addr, pud);
786 	spin_unlock(ptl);
787 }
788 
789 int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
790 			pud_t *pud, pfn_t pfn, bool write)
791 {
792 	pgprot_t pgprot = vma->vm_page_prot;
793 	/*
794 	 * If we had pud_special, we could avoid all these restrictions,
795 	 * but we need to be consistent with PTEs and architectures that
796 	 * can't support a 'special' bit.
797 	 */
798 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
799 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
800 						(VM_PFNMAP|VM_MIXEDMAP));
801 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
802 	BUG_ON(!pfn_t_devmap(pfn));
803 
804 	if (addr < vma->vm_start || addr >= vma->vm_end)
805 		return VM_FAULT_SIGBUS;
806 
807 	track_pfn_insert(vma, &pgprot, pfn);
808 
809 	insert_pfn_pud(vma, addr, pud, pfn, pgprot, write);
810 	return VM_FAULT_NOPAGE;
811 }
812 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
813 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
814 
815 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
816 		pmd_t *pmd)
817 {
818 	pmd_t _pmd;
819 
820 	/*
821 	 * We should set the dirty bit only for FOLL_WRITE but for now
822 	 * the dirty bit in the pmd is meaningless.  And if the dirty
823 	 * bit will become meaningful and we'll only set it with
824 	 * FOLL_WRITE, an atomic set_bit will be required on the pmd to
825 	 * set the young bit, instead of the current set_pmd_at.
826 	 */
827 	_pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
828 	if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
829 				pmd, _pmd,  1))
830 		update_mmu_cache_pmd(vma, addr, pmd);
831 }
832 
833 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
834 		pmd_t *pmd, int flags)
835 {
836 	unsigned long pfn = pmd_pfn(*pmd);
837 	struct mm_struct *mm = vma->vm_mm;
838 	struct dev_pagemap *pgmap;
839 	struct page *page;
840 
841 	assert_spin_locked(pmd_lockptr(mm, pmd));
842 
843 	/*
844 	 * When we COW a devmap PMD entry, we split it into PTEs, so we should
845 	 * not be in this function with `flags & FOLL_COW` set.
846 	 */
847 	WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
848 
849 	if (flags & FOLL_WRITE && !pmd_write(*pmd))
850 		return NULL;
851 
852 	if (pmd_present(*pmd) && pmd_devmap(*pmd))
853 		/* pass */;
854 	else
855 		return NULL;
856 
857 	if (flags & FOLL_TOUCH)
858 		touch_pmd(vma, addr, pmd);
859 
860 	/*
861 	 * device mapped pages can only be returned if the
862 	 * caller will manage the page reference count.
863 	 */
864 	if (!(flags & FOLL_GET))
865 		return ERR_PTR(-EEXIST);
866 
867 	pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
868 	pgmap = get_dev_pagemap(pfn, NULL);
869 	if (!pgmap)
870 		return ERR_PTR(-EFAULT);
871 	page = pfn_to_page(pfn);
872 	get_page(page);
873 	put_dev_pagemap(pgmap);
874 
875 	return page;
876 }
877 
878 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
879 		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
880 		  struct vm_area_struct *vma)
881 {
882 	spinlock_t *dst_ptl, *src_ptl;
883 	struct page *src_page;
884 	pmd_t pmd;
885 	pgtable_t pgtable = NULL;
886 	int ret = -ENOMEM;
887 
888 	/* Skip if can be re-fill on fault */
889 	if (!vma_is_anonymous(vma))
890 		return 0;
891 
892 	pgtable = pte_alloc_one(dst_mm, addr);
893 	if (unlikely(!pgtable))
894 		goto out;
895 
896 	dst_ptl = pmd_lock(dst_mm, dst_pmd);
897 	src_ptl = pmd_lockptr(src_mm, src_pmd);
898 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
899 
900 	ret = -EAGAIN;
901 	pmd = *src_pmd;
902 	if (unlikely(!pmd_trans_huge(pmd))) {
903 		pte_free(dst_mm, pgtable);
904 		goto out_unlock;
905 	}
906 	/*
907 	 * When page table lock is held, the huge zero pmd should not be
908 	 * under splitting since we don't split the page itself, only pmd to
909 	 * a page table.
910 	 */
911 	if (is_huge_zero_pmd(pmd)) {
912 		struct page *zero_page;
913 		/*
914 		 * get_huge_zero_page() will never allocate a new page here,
915 		 * since we already have a zero page to copy. It just takes a
916 		 * reference.
917 		 */
918 		zero_page = mm_get_huge_zero_page(dst_mm);
919 		set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
920 				zero_page);
921 		ret = 0;
922 		goto out_unlock;
923 	}
924 
925 	src_page = pmd_page(pmd);
926 	VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
927 	get_page(src_page);
928 	page_dup_rmap(src_page, true);
929 	add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
930 	atomic_long_inc(&dst_mm->nr_ptes);
931 	pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
932 
933 	pmdp_set_wrprotect(src_mm, addr, src_pmd);
934 	pmd = pmd_mkold(pmd_wrprotect(pmd));
935 	set_pmd_at(dst_mm, addr, dst_pmd, pmd);
936 
937 	ret = 0;
938 out_unlock:
939 	spin_unlock(src_ptl);
940 	spin_unlock(dst_ptl);
941 out:
942 	return ret;
943 }
944 
945 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
946 static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
947 		pud_t *pud)
948 {
949 	pud_t _pud;
950 
951 	/*
952 	 * We should set the dirty bit only for FOLL_WRITE but for now
953 	 * the dirty bit in the pud is meaningless.  And if the dirty
954 	 * bit will become meaningful and we'll only set it with
955 	 * FOLL_WRITE, an atomic set_bit will be required on the pud to
956 	 * set the young bit, instead of the current set_pud_at.
957 	 */
958 	_pud = pud_mkyoung(pud_mkdirty(*pud));
959 	if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
960 				pud, _pud,  1))
961 		update_mmu_cache_pud(vma, addr, pud);
962 }
963 
964 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
965 		pud_t *pud, int flags)
966 {
967 	unsigned long pfn = pud_pfn(*pud);
968 	struct mm_struct *mm = vma->vm_mm;
969 	struct dev_pagemap *pgmap;
970 	struct page *page;
971 
972 	assert_spin_locked(pud_lockptr(mm, pud));
973 
974 	if (flags & FOLL_WRITE && !pud_write(*pud))
975 		return NULL;
976 
977 	if (pud_present(*pud) && pud_devmap(*pud))
978 		/* pass */;
979 	else
980 		return NULL;
981 
982 	if (flags & FOLL_TOUCH)
983 		touch_pud(vma, addr, pud);
984 
985 	/*
986 	 * device mapped pages can only be returned if the
987 	 * caller will manage the page reference count.
988 	 */
989 	if (!(flags & FOLL_GET))
990 		return ERR_PTR(-EEXIST);
991 
992 	pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
993 	pgmap = get_dev_pagemap(pfn, NULL);
994 	if (!pgmap)
995 		return ERR_PTR(-EFAULT);
996 	page = pfn_to_page(pfn);
997 	get_page(page);
998 	put_dev_pagemap(pgmap);
999 
1000 	return page;
1001 }
1002 
1003 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1004 		  pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1005 		  struct vm_area_struct *vma)
1006 {
1007 	spinlock_t *dst_ptl, *src_ptl;
1008 	pud_t pud;
1009 	int ret;
1010 
1011 	dst_ptl = pud_lock(dst_mm, dst_pud);
1012 	src_ptl = pud_lockptr(src_mm, src_pud);
1013 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1014 
1015 	ret = -EAGAIN;
1016 	pud = *src_pud;
1017 	if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1018 		goto out_unlock;
1019 
1020 	/*
1021 	 * When page table lock is held, the huge zero pud should not be
1022 	 * under splitting since we don't split the page itself, only pud to
1023 	 * a page table.
1024 	 */
1025 	if (is_huge_zero_pud(pud)) {
1026 		/* No huge zero pud yet */
1027 	}
1028 
1029 	pudp_set_wrprotect(src_mm, addr, src_pud);
1030 	pud = pud_mkold(pud_wrprotect(pud));
1031 	set_pud_at(dst_mm, addr, dst_pud, pud);
1032 
1033 	ret = 0;
1034 out_unlock:
1035 	spin_unlock(src_ptl);
1036 	spin_unlock(dst_ptl);
1037 	return ret;
1038 }
1039 
1040 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1041 {
1042 	pud_t entry;
1043 	unsigned long haddr;
1044 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1045 
1046 	vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1047 	if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1048 		goto unlock;
1049 
1050 	entry = pud_mkyoung(orig_pud);
1051 	if (write)
1052 		entry = pud_mkdirty(entry);
1053 	haddr = vmf->address & HPAGE_PUD_MASK;
1054 	if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write))
1055 		update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud);
1056 
1057 unlock:
1058 	spin_unlock(vmf->ptl);
1059 }
1060 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1061 
1062 void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
1063 {
1064 	pmd_t entry;
1065 	unsigned long haddr;
1066 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1067 
1068 	vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1069 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
1070 		goto unlock;
1071 
1072 	entry = pmd_mkyoung(orig_pmd);
1073 	if (write)
1074 		entry = pmd_mkdirty(entry);
1075 	haddr = vmf->address & HPAGE_PMD_MASK;
1076 	if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
1077 		update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
1078 
1079 unlock:
1080 	spin_unlock(vmf->ptl);
1081 }
1082 
1083 static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
1084 		struct page *page)
1085 {
1086 	struct vm_area_struct *vma = vmf->vma;
1087 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1088 	struct mem_cgroup *memcg;
1089 	pgtable_t pgtable;
1090 	pmd_t _pmd;
1091 	int ret = 0, i;
1092 	struct page **pages;
1093 	unsigned long mmun_start;	/* For mmu_notifiers */
1094 	unsigned long mmun_end;		/* For mmu_notifiers */
1095 
1096 	pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
1097 			GFP_KERNEL);
1098 	if (unlikely(!pages)) {
1099 		ret |= VM_FAULT_OOM;
1100 		goto out;
1101 	}
1102 
1103 	for (i = 0; i < HPAGE_PMD_NR; i++) {
1104 		pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma,
1105 					       vmf->address, page_to_nid(page));
1106 		if (unlikely(!pages[i] ||
1107 			     mem_cgroup_try_charge(pages[i], vma->vm_mm,
1108 				     GFP_KERNEL, &memcg, false))) {
1109 			if (pages[i])
1110 				put_page(pages[i]);
1111 			while (--i >= 0) {
1112 				memcg = (void *)page_private(pages[i]);
1113 				set_page_private(pages[i], 0);
1114 				mem_cgroup_cancel_charge(pages[i], memcg,
1115 						false);
1116 				put_page(pages[i]);
1117 			}
1118 			kfree(pages);
1119 			ret |= VM_FAULT_OOM;
1120 			goto out;
1121 		}
1122 		set_page_private(pages[i], (unsigned long)memcg);
1123 	}
1124 
1125 	for (i = 0; i < HPAGE_PMD_NR; i++) {
1126 		copy_user_highpage(pages[i], page + i,
1127 				   haddr + PAGE_SIZE * i, vma);
1128 		__SetPageUptodate(pages[i]);
1129 		cond_resched();
1130 	}
1131 
1132 	mmun_start = haddr;
1133 	mmun_end   = haddr + HPAGE_PMD_SIZE;
1134 	mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
1135 
1136 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1137 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
1138 		goto out_free_pages;
1139 	VM_BUG_ON_PAGE(!PageHead(page), page);
1140 
1141 	pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
1142 	/* leave pmd empty until pte is filled */
1143 
1144 	pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd);
1145 	pmd_populate(vma->vm_mm, &_pmd, pgtable);
1146 
1147 	for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1148 		pte_t entry;
1149 		entry = mk_pte(pages[i], vma->vm_page_prot);
1150 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1151 		memcg = (void *)page_private(pages[i]);
1152 		set_page_private(pages[i], 0);
1153 		page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false);
1154 		mem_cgroup_commit_charge(pages[i], memcg, false, false);
1155 		lru_cache_add_active_or_unevictable(pages[i], vma);
1156 		vmf->pte = pte_offset_map(&_pmd, haddr);
1157 		VM_BUG_ON(!pte_none(*vmf->pte));
1158 		set_pte_at(vma->vm_mm, haddr, vmf->pte, entry);
1159 		pte_unmap(vmf->pte);
1160 	}
1161 	kfree(pages);
1162 
1163 	smp_wmb(); /* make pte visible before pmd */
1164 	pmd_populate(vma->vm_mm, vmf->pmd, pgtable);
1165 	page_remove_rmap(page, true);
1166 	spin_unlock(vmf->ptl);
1167 
1168 	mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
1169 
1170 	ret |= VM_FAULT_WRITE;
1171 	put_page(page);
1172 
1173 out:
1174 	return ret;
1175 
1176 out_free_pages:
1177 	spin_unlock(vmf->ptl);
1178 	mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
1179 	for (i = 0; i < HPAGE_PMD_NR; i++) {
1180 		memcg = (void *)page_private(pages[i]);
1181 		set_page_private(pages[i], 0);
1182 		mem_cgroup_cancel_charge(pages[i], memcg, false);
1183 		put_page(pages[i]);
1184 	}
1185 	kfree(pages);
1186 	goto out;
1187 }
1188 
1189 int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
1190 {
1191 	struct vm_area_struct *vma = vmf->vma;
1192 	struct page *page = NULL, *new_page;
1193 	struct mem_cgroup *memcg;
1194 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1195 	unsigned long mmun_start;	/* For mmu_notifiers */
1196 	unsigned long mmun_end;		/* For mmu_notifiers */
1197 	gfp_t huge_gfp;			/* for allocation and charge */
1198 	int ret = 0;
1199 
1200 	vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1201 	VM_BUG_ON_VMA(!vma->anon_vma, vma);
1202 	if (is_huge_zero_pmd(orig_pmd))
1203 		goto alloc;
1204 	spin_lock(vmf->ptl);
1205 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
1206 		goto out_unlock;
1207 
1208 	page = pmd_page(orig_pmd);
1209 	VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
1210 	/*
1211 	 * We can only reuse the page if nobody else maps the huge page or it's
1212 	 * part.
1213 	 */
1214 	if (page_trans_huge_mapcount(page, NULL) == 1) {
1215 		pmd_t entry;
1216 		entry = pmd_mkyoung(orig_pmd);
1217 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1218 		if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry,  1))
1219 			update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1220 		ret |= VM_FAULT_WRITE;
1221 		goto out_unlock;
1222 	}
1223 	get_page(page);
1224 	spin_unlock(vmf->ptl);
1225 alloc:
1226 	if (transparent_hugepage_enabled(vma) &&
1227 	    !transparent_hugepage_debug_cow()) {
1228 		huge_gfp = alloc_hugepage_direct_gfpmask(vma);
1229 		new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
1230 	} else
1231 		new_page = NULL;
1232 
1233 	if (likely(new_page)) {
1234 		prep_transhuge_page(new_page);
1235 	} else {
1236 		if (!page) {
1237 			split_huge_pmd(vma, vmf->pmd, vmf->address);
1238 			ret |= VM_FAULT_FALLBACK;
1239 		} else {
1240 			ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page);
1241 			if (ret & VM_FAULT_OOM) {
1242 				split_huge_pmd(vma, vmf->pmd, vmf->address);
1243 				ret |= VM_FAULT_FALLBACK;
1244 			}
1245 			put_page(page);
1246 		}
1247 		count_vm_event(THP_FAULT_FALLBACK);
1248 		goto out;
1249 	}
1250 
1251 	if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm,
1252 					huge_gfp, &memcg, true))) {
1253 		put_page(new_page);
1254 		split_huge_pmd(vma, vmf->pmd, vmf->address);
1255 		if (page)
1256 			put_page(page);
1257 		ret |= VM_FAULT_FALLBACK;
1258 		count_vm_event(THP_FAULT_FALLBACK);
1259 		goto out;
1260 	}
1261 
1262 	count_vm_event(THP_FAULT_ALLOC);
1263 
1264 	if (!page)
1265 		clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
1266 	else
1267 		copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
1268 	__SetPageUptodate(new_page);
1269 
1270 	mmun_start = haddr;
1271 	mmun_end   = haddr + HPAGE_PMD_SIZE;
1272 	mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
1273 
1274 	spin_lock(vmf->ptl);
1275 	if (page)
1276 		put_page(page);
1277 	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1278 		spin_unlock(vmf->ptl);
1279 		mem_cgroup_cancel_charge(new_page, memcg, true);
1280 		put_page(new_page);
1281 		goto out_mn;
1282 	} else {
1283 		pmd_t entry;
1284 		entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1285 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1286 		pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
1287 		page_add_new_anon_rmap(new_page, vma, haddr, true);
1288 		mem_cgroup_commit_charge(new_page, memcg, false, true);
1289 		lru_cache_add_active_or_unevictable(new_page, vma);
1290 		set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
1291 		update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1292 		if (!page) {
1293 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1294 		} else {
1295 			VM_BUG_ON_PAGE(!PageHead(page), page);
1296 			page_remove_rmap(page, true);
1297 			put_page(page);
1298 		}
1299 		ret |= VM_FAULT_WRITE;
1300 	}
1301 	spin_unlock(vmf->ptl);
1302 out_mn:
1303 	mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
1304 out:
1305 	return ret;
1306 out_unlock:
1307 	spin_unlock(vmf->ptl);
1308 	return ret;
1309 }
1310 
1311 /*
1312  * FOLL_FORCE can write to even unwritable pmd's, but only
1313  * after we've gone through a COW cycle and they are dirty.
1314  */
1315 static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
1316 {
1317 	return pmd_write(pmd) ||
1318 	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
1319 }
1320 
1321 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1322 				   unsigned long addr,
1323 				   pmd_t *pmd,
1324 				   unsigned int flags)
1325 {
1326 	struct mm_struct *mm = vma->vm_mm;
1327 	struct page *page = NULL;
1328 
1329 	assert_spin_locked(pmd_lockptr(mm, pmd));
1330 
1331 	if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
1332 		goto out;
1333 
1334 	/* Avoid dumping huge zero page */
1335 	if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1336 		return ERR_PTR(-EFAULT);
1337 
1338 	/* Full NUMA hinting faults to serialise migration in fault paths */
1339 	if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
1340 		goto out;
1341 
1342 	page = pmd_page(*pmd);
1343 	VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
1344 	if (flags & FOLL_TOUCH)
1345 		touch_pmd(vma, addr, pmd);
1346 	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1347 		/*
1348 		 * We don't mlock() pte-mapped THPs. This way we can avoid
1349 		 * leaking mlocked pages into non-VM_LOCKED VMAs.
1350 		 *
1351 		 * For anon THP:
1352 		 *
1353 		 * In most cases the pmd is the only mapping of the page as we
1354 		 * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for
1355 		 * writable private mappings in populate_vma_page_range().
1356 		 *
1357 		 * The only scenario when we have the page shared here is if we
1358 		 * mlocking read-only mapping shared over fork(). We skip
1359 		 * mlocking such pages.
1360 		 *
1361 		 * For file THP:
1362 		 *
1363 		 * We can expect PageDoubleMap() to be stable under page lock:
1364 		 * for file pages we set it in page_add_file_rmap(), which
1365 		 * requires page to be locked.
1366 		 */
1367 
1368 		if (PageAnon(page) && compound_mapcount(page) != 1)
1369 			goto skip_mlock;
1370 		if (PageDoubleMap(page) || !page->mapping)
1371 			goto skip_mlock;
1372 		if (!trylock_page(page))
1373 			goto skip_mlock;
1374 		lru_add_drain();
1375 		if (page->mapping && !PageDoubleMap(page))
1376 			mlock_vma_page(page);
1377 		unlock_page(page);
1378 	}
1379 skip_mlock:
1380 	page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1381 	VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
1382 	if (flags & FOLL_GET)
1383 		get_page(page);
1384 
1385 out:
1386 	return page;
1387 }
1388 
1389 /* NUMA hinting page fault entry point for trans huge pmds */
1390 int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
1391 {
1392 	struct vm_area_struct *vma = vmf->vma;
1393 	struct anon_vma *anon_vma = NULL;
1394 	struct page *page;
1395 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1396 	int page_nid = -1, this_nid = numa_node_id();
1397 	int target_nid, last_cpupid = -1;
1398 	bool page_locked;
1399 	bool migrated = false;
1400 	bool was_writable;
1401 	int flags = 0;
1402 
1403 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1404 	if (unlikely(!pmd_same(pmd, *vmf->pmd)))
1405 		goto out_unlock;
1406 
1407 	/*
1408 	 * If there are potential migrations, wait for completion and retry
1409 	 * without disrupting NUMA hinting information. Do not relock and
1410 	 * check_same as the page may no longer be mapped.
1411 	 */
1412 	if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
1413 		page = pmd_page(*vmf->pmd);
1414 		spin_unlock(vmf->ptl);
1415 		wait_on_page_locked(page);
1416 		goto out;
1417 	}
1418 
1419 	page = pmd_page(pmd);
1420 	BUG_ON(is_huge_zero_page(page));
1421 	page_nid = page_to_nid(page);
1422 	last_cpupid = page_cpupid_last(page);
1423 	count_vm_numa_event(NUMA_HINT_FAULTS);
1424 	if (page_nid == this_nid) {
1425 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
1426 		flags |= TNF_FAULT_LOCAL;
1427 	}
1428 
1429 	/* See similar comment in do_numa_page for explanation */
1430 	if (!pmd_savedwrite(pmd))
1431 		flags |= TNF_NO_GROUP;
1432 
1433 	/*
1434 	 * Acquire the page lock to serialise THP migrations but avoid dropping
1435 	 * page_table_lock if at all possible
1436 	 */
1437 	page_locked = trylock_page(page);
1438 	target_nid = mpol_misplaced(page, vma, haddr);
1439 	if (target_nid == -1) {
1440 		/* If the page was locked, there are no parallel migrations */
1441 		if (page_locked)
1442 			goto clear_pmdnuma;
1443 	}
1444 
1445 	/* Migration could have started since the pmd_trans_migrating check */
1446 	if (!page_locked) {
1447 		spin_unlock(vmf->ptl);
1448 		wait_on_page_locked(page);
1449 		page_nid = -1;
1450 		goto out;
1451 	}
1452 
1453 	/*
1454 	 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1455 	 * to serialises splits
1456 	 */
1457 	get_page(page);
1458 	spin_unlock(vmf->ptl);
1459 	anon_vma = page_lock_anon_vma_read(page);
1460 
1461 	/* Confirm the PMD did not change while page_table_lock was released */
1462 	spin_lock(vmf->ptl);
1463 	if (unlikely(!pmd_same(pmd, *vmf->pmd))) {
1464 		unlock_page(page);
1465 		put_page(page);
1466 		page_nid = -1;
1467 		goto out_unlock;
1468 	}
1469 
1470 	/* Bail if we fail to protect against THP splits for any reason */
1471 	if (unlikely(!anon_vma)) {
1472 		put_page(page);
1473 		page_nid = -1;
1474 		goto clear_pmdnuma;
1475 	}
1476 
1477 	/*
1478 	 * Migrate the THP to the requested node, returns with page unlocked
1479 	 * and access rights restored.
1480 	 */
1481 	spin_unlock(vmf->ptl);
1482 	migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma,
1483 				vmf->pmd, pmd, vmf->address, page, target_nid);
1484 	if (migrated) {
1485 		flags |= TNF_MIGRATED;
1486 		page_nid = target_nid;
1487 	} else
1488 		flags |= TNF_MIGRATE_FAIL;
1489 
1490 	goto out;
1491 clear_pmdnuma:
1492 	BUG_ON(!PageLocked(page));
1493 	was_writable = pmd_savedwrite(pmd);
1494 	pmd = pmd_modify(pmd, vma->vm_page_prot);
1495 	pmd = pmd_mkyoung(pmd);
1496 	if (was_writable)
1497 		pmd = pmd_mkwrite(pmd);
1498 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1499 	update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1500 	unlock_page(page);
1501 out_unlock:
1502 	spin_unlock(vmf->ptl);
1503 
1504 out:
1505 	if (anon_vma)
1506 		page_unlock_anon_vma_read(anon_vma);
1507 
1508 	if (page_nid != -1)
1509 		task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
1510 				flags);
1511 
1512 	return 0;
1513 }
1514 
1515 /*
1516  * Return true if we do MADV_FREE successfully on entire pmd page.
1517  * Otherwise, return false.
1518  */
1519 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1520 		pmd_t *pmd, unsigned long addr, unsigned long next)
1521 {
1522 	spinlock_t *ptl;
1523 	pmd_t orig_pmd;
1524 	struct page *page;
1525 	struct mm_struct *mm = tlb->mm;
1526 	bool ret = false;
1527 
1528 	tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE);
1529 
1530 	ptl = pmd_trans_huge_lock(pmd, vma);
1531 	if (!ptl)
1532 		goto out_unlocked;
1533 
1534 	orig_pmd = *pmd;
1535 	if (is_huge_zero_pmd(orig_pmd))
1536 		goto out;
1537 
1538 	page = pmd_page(orig_pmd);
1539 	/*
1540 	 * If other processes are mapping this page, we couldn't discard
1541 	 * the page unless they all do MADV_FREE so let's skip the page.
1542 	 */
1543 	if (page_mapcount(page) != 1)
1544 		goto out;
1545 
1546 	if (!trylock_page(page))
1547 		goto out;
1548 
1549 	/*
1550 	 * If user want to discard part-pages of THP, split it so MADV_FREE
1551 	 * will deactivate only them.
1552 	 */
1553 	if (next - addr != HPAGE_PMD_SIZE) {
1554 		get_page(page);
1555 		spin_unlock(ptl);
1556 		split_huge_page(page);
1557 		put_page(page);
1558 		unlock_page(page);
1559 		goto out_unlocked;
1560 	}
1561 
1562 	if (PageDirty(page))
1563 		ClearPageDirty(page);
1564 	unlock_page(page);
1565 
1566 	if (PageActive(page))
1567 		deactivate_page(page);
1568 
1569 	if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1570 		orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
1571 			tlb->fullmm);
1572 		orig_pmd = pmd_mkold(orig_pmd);
1573 		orig_pmd = pmd_mkclean(orig_pmd);
1574 
1575 		set_pmd_at(mm, addr, pmd, orig_pmd);
1576 		tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1577 	}
1578 	ret = true;
1579 out:
1580 	spin_unlock(ptl);
1581 out_unlocked:
1582 	return ret;
1583 }
1584 
1585 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1586 {
1587 	pgtable_t pgtable;
1588 
1589 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1590 	pte_free(mm, pgtable);
1591 	atomic_long_dec(&mm->nr_ptes);
1592 }
1593 
1594 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1595 		 pmd_t *pmd, unsigned long addr)
1596 {
1597 	pmd_t orig_pmd;
1598 	spinlock_t *ptl;
1599 
1600 	tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE);
1601 
1602 	ptl = __pmd_trans_huge_lock(pmd, vma);
1603 	if (!ptl)
1604 		return 0;
1605 	/*
1606 	 * For architectures like ppc64 we look at deposited pgtable
1607 	 * when calling pmdp_huge_get_and_clear. So do the
1608 	 * pgtable_trans_huge_withdraw after finishing pmdp related
1609 	 * operations.
1610 	 */
1611 	orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
1612 			tlb->fullmm);
1613 	tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1614 	if (vma_is_dax(vma)) {
1615 		spin_unlock(ptl);
1616 		if (is_huge_zero_pmd(orig_pmd))
1617 			tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
1618 	} else if (is_huge_zero_pmd(orig_pmd)) {
1619 		pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
1620 		atomic_long_dec(&tlb->mm->nr_ptes);
1621 		spin_unlock(ptl);
1622 		tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
1623 	} else {
1624 		struct page *page = pmd_page(orig_pmd);
1625 		page_remove_rmap(page, true);
1626 		VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1627 		VM_BUG_ON_PAGE(!PageHead(page), page);
1628 		if (PageAnon(page)) {
1629 			pgtable_t pgtable;
1630 			pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
1631 			pte_free(tlb->mm, pgtable);
1632 			atomic_long_dec(&tlb->mm->nr_ptes);
1633 			add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1634 		} else {
1635 			if (arch_needs_pgtable_deposit())
1636 				zap_deposited_table(tlb->mm, pmd);
1637 			add_mm_counter(tlb->mm, MM_FILEPAGES, -HPAGE_PMD_NR);
1638 		}
1639 		spin_unlock(ptl);
1640 		tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
1641 	}
1642 	return 1;
1643 }
1644 
1645 #ifndef pmd_move_must_withdraw
1646 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
1647 					 spinlock_t *old_pmd_ptl,
1648 					 struct vm_area_struct *vma)
1649 {
1650 	/*
1651 	 * With split pmd lock we also need to move preallocated
1652 	 * PTE page table if new_pmd is on different PMD page table.
1653 	 *
1654 	 * We also don't deposit and withdraw tables for file pages.
1655 	 */
1656 	return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
1657 }
1658 #endif
1659 
1660 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1661 		  unsigned long new_addr, unsigned long old_end,
1662 		  pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
1663 {
1664 	spinlock_t *old_ptl, *new_ptl;
1665 	pmd_t pmd;
1666 	struct mm_struct *mm = vma->vm_mm;
1667 	bool force_flush = false;
1668 
1669 	if ((old_addr & ~HPAGE_PMD_MASK) ||
1670 	    (new_addr & ~HPAGE_PMD_MASK) ||
1671 	    old_end - old_addr < HPAGE_PMD_SIZE)
1672 		return false;
1673 
1674 	/*
1675 	 * The destination pmd shouldn't be established, free_pgtables()
1676 	 * should have release it.
1677 	 */
1678 	if (WARN_ON(!pmd_none(*new_pmd))) {
1679 		VM_BUG_ON(pmd_trans_huge(*new_pmd));
1680 		return false;
1681 	}
1682 
1683 	/*
1684 	 * We don't have to worry about the ordering of src and dst
1685 	 * ptlocks because exclusive mmap_sem prevents deadlock.
1686 	 */
1687 	old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1688 	if (old_ptl) {
1689 		new_ptl = pmd_lockptr(mm, new_pmd);
1690 		if (new_ptl != old_ptl)
1691 			spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1692 		pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1693 		if (pmd_present(pmd) && pmd_dirty(pmd))
1694 			force_flush = true;
1695 		VM_BUG_ON(!pmd_none(*new_pmd));
1696 
1697 		if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
1698 			pgtable_t pgtable;
1699 			pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1700 			pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1701 		}
1702 		set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1703 		if (new_ptl != old_ptl)
1704 			spin_unlock(new_ptl);
1705 		if (force_flush)
1706 			flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
1707 		else
1708 			*need_flush = true;
1709 		spin_unlock(old_ptl);
1710 		return true;
1711 	}
1712 	return false;
1713 }
1714 
1715 /*
1716  * Returns
1717  *  - 0 if PMD could not be locked
1718  *  - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1719  *  - HPAGE_PMD_NR is protections changed and TLB flush necessary
1720  */
1721 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1722 		unsigned long addr, pgprot_t newprot, int prot_numa)
1723 {
1724 	struct mm_struct *mm = vma->vm_mm;
1725 	spinlock_t *ptl;
1726 	int ret = 0;
1727 
1728 	ptl = __pmd_trans_huge_lock(pmd, vma);
1729 	if (ptl) {
1730 		pmd_t entry;
1731 		bool preserve_write = prot_numa && pmd_write(*pmd);
1732 		ret = 1;
1733 
1734 		/*
1735 		 * Avoid trapping faults against the zero page. The read-only
1736 		 * data is likely to be read-cached on the local CPU and
1737 		 * local/remote hits to the zero page are not interesting.
1738 		 */
1739 		if (prot_numa && is_huge_zero_pmd(*pmd)) {
1740 			spin_unlock(ptl);
1741 			return ret;
1742 		}
1743 
1744 		if (!prot_numa || !pmd_protnone(*pmd)) {
1745 			entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
1746 			entry = pmd_modify(entry, newprot);
1747 			if (preserve_write)
1748 				entry = pmd_mk_savedwrite(entry);
1749 			ret = HPAGE_PMD_NR;
1750 			set_pmd_at(mm, addr, pmd, entry);
1751 			BUG_ON(vma_is_anonymous(vma) && !preserve_write &&
1752 					pmd_write(entry));
1753 		}
1754 		spin_unlock(ptl);
1755 	}
1756 
1757 	return ret;
1758 }
1759 
1760 /*
1761  * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1762  *
1763  * Note that if it returns page table lock pointer, this routine returns without
1764  * unlocking page table lock. So callers must unlock it.
1765  */
1766 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
1767 {
1768 	spinlock_t *ptl;
1769 	ptl = pmd_lock(vma->vm_mm, pmd);
1770 	if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
1771 		return ptl;
1772 	spin_unlock(ptl);
1773 	return NULL;
1774 }
1775 
1776 /*
1777  * Returns true if a given pud maps a thp, false otherwise.
1778  *
1779  * Note that if it returns true, this routine returns without unlocking page
1780  * table lock. So callers must unlock it.
1781  */
1782 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
1783 {
1784 	spinlock_t *ptl;
1785 
1786 	ptl = pud_lock(vma->vm_mm, pud);
1787 	if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
1788 		return ptl;
1789 	spin_unlock(ptl);
1790 	return NULL;
1791 }
1792 
1793 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1794 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
1795 		 pud_t *pud, unsigned long addr)
1796 {
1797 	pud_t orig_pud;
1798 	spinlock_t *ptl;
1799 
1800 	ptl = __pud_trans_huge_lock(pud, vma);
1801 	if (!ptl)
1802 		return 0;
1803 	/*
1804 	 * For architectures like ppc64 we look at deposited pgtable
1805 	 * when calling pudp_huge_get_and_clear. So do the
1806 	 * pgtable_trans_huge_withdraw after finishing pudp related
1807 	 * operations.
1808 	 */
1809 	orig_pud = pudp_huge_get_and_clear_full(tlb->mm, addr, pud,
1810 			tlb->fullmm);
1811 	tlb_remove_pud_tlb_entry(tlb, pud, addr);
1812 	if (vma_is_dax(vma)) {
1813 		spin_unlock(ptl);
1814 		/* No zero page support yet */
1815 	} else {
1816 		/* No support for anonymous PUD pages yet */
1817 		BUG();
1818 	}
1819 	return 1;
1820 }
1821 
1822 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
1823 		unsigned long haddr)
1824 {
1825 	VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
1826 	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
1827 	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
1828 	VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
1829 
1830 	count_vm_event(THP_SPLIT_PMD);
1831 
1832 	pudp_huge_clear_flush_notify(vma, haddr, pud);
1833 }
1834 
1835 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
1836 		unsigned long address)
1837 {
1838 	spinlock_t *ptl;
1839 	struct mm_struct *mm = vma->vm_mm;
1840 	unsigned long haddr = address & HPAGE_PUD_MASK;
1841 
1842 	mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PUD_SIZE);
1843 	ptl = pud_lock(mm, pud);
1844 	if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
1845 		goto out;
1846 	__split_huge_pud_locked(vma, pud, haddr);
1847 
1848 out:
1849 	spin_unlock(ptl);
1850 	mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PUD_SIZE);
1851 }
1852 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1853 
1854 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
1855 		unsigned long haddr, pmd_t *pmd)
1856 {
1857 	struct mm_struct *mm = vma->vm_mm;
1858 	pgtable_t pgtable;
1859 	pmd_t _pmd;
1860 	int i;
1861 
1862 	/* leave pmd empty until pte is filled */
1863 	pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1864 
1865 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1866 	pmd_populate(mm, &_pmd, pgtable);
1867 
1868 	for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1869 		pte_t *pte, entry;
1870 		entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
1871 		entry = pte_mkspecial(entry);
1872 		pte = pte_offset_map(&_pmd, haddr);
1873 		VM_BUG_ON(!pte_none(*pte));
1874 		set_pte_at(mm, haddr, pte, entry);
1875 		pte_unmap(pte);
1876 	}
1877 	smp_wmb(); /* make pte visible before pmd */
1878 	pmd_populate(mm, pmd, pgtable);
1879 }
1880 
1881 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
1882 		unsigned long haddr, bool freeze)
1883 {
1884 	struct mm_struct *mm = vma->vm_mm;
1885 	struct page *page;
1886 	pgtable_t pgtable;
1887 	pmd_t _pmd;
1888 	bool young, write, dirty, soft_dirty;
1889 	unsigned long addr;
1890 	int i;
1891 
1892 	VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
1893 	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
1894 	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
1895 	VM_BUG_ON(!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd));
1896 
1897 	count_vm_event(THP_SPLIT_PMD);
1898 
1899 	if (!vma_is_anonymous(vma)) {
1900 		_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1901 		/*
1902 		 * We are going to unmap this huge page. So
1903 		 * just go ahead and zap it
1904 		 */
1905 		if (arch_needs_pgtable_deposit())
1906 			zap_deposited_table(mm, pmd);
1907 		if (vma_is_dax(vma))
1908 			return;
1909 		page = pmd_page(_pmd);
1910 		if (!PageReferenced(page) && pmd_young(_pmd))
1911 			SetPageReferenced(page);
1912 		page_remove_rmap(page, true);
1913 		put_page(page);
1914 		add_mm_counter(mm, MM_FILEPAGES, -HPAGE_PMD_NR);
1915 		return;
1916 	} else if (is_huge_zero_pmd(*pmd)) {
1917 		return __split_huge_zero_page_pmd(vma, haddr, pmd);
1918 	}
1919 
1920 	page = pmd_page(*pmd);
1921 	VM_BUG_ON_PAGE(!page_count(page), page);
1922 	page_ref_add(page, HPAGE_PMD_NR - 1);
1923 	write = pmd_write(*pmd);
1924 	young = pmd_young(*pmd);
1925 	dirty = pmd_dirty(*pmd);
1926 	soft_dirty = pmd_soft_dirty(*pmd);
1927 
1928 	pmdp_huge_split_prepare(vma, haddr, pmd);
1929 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1930 	pmd_populate(mm, &_pmd, pgtable);
1931 
1932 	for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
1933 		pte_t entry, *pte;
1934 		/*
1935 		 * Note that NUMA hinting access restrictions are not
1936 		 * transferred to avoid any possibility of altering
1937 		 * permissions across VMAs.
1938 		 */
1939 		if (freeze) {
1940 			swp_entry_t swp_entry;
1941 			swp_entry = make_migration_entry(page + i, write);
1942 			entry = swp_entry_to_pte(swp_entry);
1943 			if (soft_dirty)
1944 				entry = pte_swp_mksoft_dirty(entry);
1945 		} else {
1946 			entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
1947 			entry = maybe_mkwrite(entry, vma);
1948 			if (!write)
1949 				entry = pte_wrprotect(entry);
1950 			if (!young)
1951 				entry = pte_mkold(entry);
1952 			if (soft_dirty)
1953 				entry = pte_mksoft_dirty(entry);
1954 		}
1955 		if (dirty)
1956 			SetPageDirty(page + i);
1957 		pte = pte_offset_map(&_pmd, addr);
1958 		BUG_ON(!pte_none(*pte));
1959 		set_pte_at(mm, addr, pte, entry);
1960 		atomic_inc(&page[i]._mapcount);
1961 		pte_unmap(pte);
1962 	}
1963 
1964 	/*
1965 	 * Set PG_double_map before dropping compound_mapcount to avoid
1966 	 * false-negative page_mapped().
1967 	 */
1968 	if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
1969 		for (i = 0; i < HPAGE_PMD_NR; i++)
1970 			atomic_inc(&page[i]._mapcount);
1971 	}
1972 
1973 	if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
1974 		/* Last compound_mapcount is gone. */
1975 		__dec_node_page_state(page, NR_ANON_THPS);
1976 		if (TestClearPageDoubleMap(page)) {
1977 			/* No need in mapcount reference anymore */
1978 			for (i = 0; i < HPAGE_PMD_NR; i++)
1979 				atomic_dec(&page[i]._mapcount);
1980 		}
1981 	}
1982 
1983 	smp_wmb(); /* make pte visible before pmd */
1984 	/*
1985 	 * Up to this point the pmd is present and huge and userland has the
1986 	 * whole access to the hugepage during the split (which happens in
1987 	 * place). If we overwrite the pmd with the not-huge version pointing
1988 	 * to the pte here (which of course we could if all CPUs were bug
1989 	 * free), userland could trigger a small page size TLB miss on the
1990 	 * small sized TLB while the hugepage TLB entry is still established in
1991 	 * the huge TLB. Some CPU doesn't like that.
1992 	 * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum
1993 	 * 383 on page 93. Intel should be safe but is also warns that it's
1994 	 * only safe if the permission and cache attributes of the two entries
1995 	 * loaded in the two TLB is identical (which should be the case here).
1996 	 * But it is generally safer to never allow small and huge TLB entries
1997 	 * for the same virtual address to be loaded simultaneously. So instead
1998 	 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
1999 	 * current pmd notpresent (atomically because here the pmd_trans_huge
2000 	 * and pmd_trans_splitting must remain set at all times on the pmd
2001 	 * until the split is complete for this pmd), then we flush the SMP TLB
2002 	 * and finally we write the non-huge version of the pmd entry with
2003 	 * pmd_populate.
2004 	 */
2005 	pmdp_invalidate(vma, haddr, pmd);
2006 	pmd_populate(mm, pmd, pgtable);
2007 
2008 	if (freeze) {
2009 		for (i = 0; i < HPAGE_PMD_NR; i++) {
2010 			page_remove_rmap(page + i, false);
2011 			put_page(page + i);
2012 		}
2013 	}
2014 }
2015 
2016 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2017 		unsigned long address, bool freeze, struct page *page)
2018 {
2019 	spinlock_t *ptl;
2020 	struct mm_struct *mm = vma->vm_mm;
2021 	unsigned long haddr = address & HPAGE_PMD_MASK;
2022 
2023 	mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
2024 	ptl = pmd_lock(mm, pmd);
2025 
2026 	/*
2027 	 * If caller asks to setup a migration entries, we need a page to check
2028 	 * pmd against. Otherwise we can end up replacing wrong page.
2029 	 */
2030 	VM_BUG_ON(freeze && !page);
2031 	if (page && page != pmd_page(*pmd))
2032 	        goto out;
2033 
2034 	if (pmd_trans_huge(*pmd)) {
2035 		page = pmd_page(*pmd);
2036 		if (PageMlocked(page))
2037 			clear_page_mlock(page);
2038 	} else if (!pmd_devmap(*pmd))
2039 		goto out;
2040 	__split_huge_pmd_locked(vma, pmd, haddr, freeze);
2041 out:
2042 	spin_unlock(ptl);
2043 	mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE);
2044 }
2045 
2046 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2047 		bool freeze, struct page *page)
2048 {
2049 	pgd_t *pgd;
2050 	pud_t *pud;
2051 	pmd_t *pmd;
2052 
2053 	pgd = pgd_offset(vma->vm_mm, address);
2054 	if (!pgd_present(*pgd))
2055 		return;
2056 
2057 	pud = pud_offset(pgd, address);
2058 	if (!pud_present(*pud))
2059 		return;
2060 
2061 	pmd = pmd_offset(pud, address);
2062 
2063 	__split_huge_pmd(vma, pmd, address, freeze, page);
2064 }
2065 
2066 void vma_adjust_trans_huge(struct vm_area_struct *vma,
2067 			     unsigned long start,
2068 			     unsigned long end,
2069 			     long adjust_next)
2070 {
2071 	/*
2072 	 * If the new start address isn't hpage aligned and it could
2073 	 * previously contain an hugepage: check if we need to split
2074 	 * an huge pmd.
2075 	 */
2076 	if (start & ~HPAGE_PMD_MASK &&
2077 	    (start & HPAGE_PMD_MASK) >= vma->vm_start &&
2078 	    (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2079 		split_huge_pmd_address(vma, start, false, NULL);
2080 
2081 	/*
2082 	 * If the new end address isn't hpage aligned and it could
2083 	 * previously contain an hugepage: check if we need to split
2084 	 * an huge pmd.
2085 	 */
2086 	if (end & ~HPAGE_PMD_MASK &&
2087 	    (end & HPAGE_PMD_MASK) >= vma->vm_start &&
2088 	    (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2089 		split_huge_pmd_address(vma, end, false, NULL);
2090 
2091 	/*
2092 	 * If we're also updating the vma->vm_next->vm_start, if the new
2093 	 * vm_next->vm_start isn't page aligned and it could previously
2094 	 * contain an hugepage: check if we need to split an huge pmd.
2095 	 */
2096 	if (adjust_next > 0) {
2097 		struct vm_area_struct *next = vma->vm_next;
2098 		unsigned long nstart = next->vm_start;
2099 		nstart += adjust_next << PAGE_SHIFT;
2100 		if (nstart & ~HPAGE_PMD_MASK &&
2101 		    (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2102 		    (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2103 			split_huge_pmd_address(next, nstart, false, NULL);
2104 	}
2105 }
2106 
2107 static void freeze_page(struct page *page)
2108 {
2109 	enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
2110 		TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
2111 	int ret;
2112 
2113 	VM_BUG_ON_PAGE(!PageHead(page), page);
2114 
2115 	if (PageAnon(page))
2116 		ttu_flags |= TTU_MIGRATION;
2117 
2118 	ret = try_to_unmap(page, ttu_flags);
2119 	VM_BUG_ON_PAGE(ret, page);
2120 }
2121 
2122 static void unfreeze_page(struct page *page)
2123 {
2124 	int i;
2125 	if (PageTransHuge(page)) {
2126 		remove_migration_ptes(page, page, true);
2127 	} else {
2128 		for (i = 0; i < HPAGE_PMD_NR; i++)
2129 			remove_migration_ptes(page + i, page + i, true);
2130 	}
2131 }
2132 
2133 static void __split_huge_page_tail(struct page *head, int tail,
2134 		struct lruvec *lruvec, struct list_head *list)
2135 {
2136 	struct page *page_tail = head + tail;
2137 
2138 	VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
2139 	VM_BUG_ON_PAGE(page_ref_count(page_tail) != 0, page_tail);
2140 
2141 	/*
2142 	 * tail_page->_refcount is zero and not changing from under us. But
2143 	 * get_page_unless_zero() may be running from under us on the
2144 	 * tail_page. If we used atomic_set() below instead of atomic_inc() or
2145 	 * atomic_add(), we would then run atomic_set() concurrently with
2146 	 * get_page_unless_zero(), and atomic_set() is implemented in C not
2147 	 * using locked ops. spin_unlock on x86 sometime uses locked ops
2148 	 * because of PPro errata 66, 92, so unless somebody can guarantee
2149 	 * atomic_set() here would be safe on all archs (and not only on x86),
2150 	 * it's safer to use atomic_inc()/atomic_add().
2151 	 */
2152 	if (PageAnon(head)) {
2153 		page_ref_inc(page_tail);
2154 	} else {
2155 		/* Additional pin to radix tree */
2156 		page_ref_add(page_tail, 2);
2157 	}
2158 
2159 	page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2160 	page_tail->flags |= (head->flags &
2161 			((1L << PG_referenced) |
2162 			 (1L << PG_swapbacked) |
2163 			 (1L << PG_mlocked) |
2164 			 (1L << PG_uptodate) |
2165 			 (1L << PG_active) |
2166 			 (1L << PG_locked) |
2167 			 (1L << PG_unevictable) |
2168 			 (1L << PG_dirty)));
2169 
2170 	/*
2171 	 * After clearing PageTail the gup refcount can be released.
2172 	 * Page flags also must be visible before we make the page non-compound.
2173 	 */
2174 	smp_wmb();
2175 
2176 	clear_compound_head(page_tail);
2177 
2178 	if (page_is_young(head))
2179 		set_page_young(page_tail);
2180 	if (page_is_idle(head))
2181 		set_page_idle(page_tail);
2182 
2183 	/* ->mapping in first tail page is compound_mapcount */
2184 	VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2185 			page_tail);
2186 	page_tail->mapping = head->mapping;
2187 
2188 	page_tail->index = head->index + tail;
2189 	page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
2190 	lru_add_page_tail(head, page_tail, lruvec, list);
2191 }
2192 
2193 static void __split_huge_page(struct page *page, struct list_head *list,
2194 		unsigned long flags)
2195 {
2196 	struct page *head = compound_head(page);
2197 	struct zone *zone = page_zone(head);
2198 	struct lruvec *lruvec;
2199 	pgoff_t end = -1;
2200 	int i;
2201 
2202 	lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
2203 
2204 	/* complete memcg works before add pages to LRU */
2205 	mem_cgroup_split_huge_fixup(head);
2206 
2207 	if (!PageAnon(page))
2208 		end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE);
2209 
2210 	for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
2211 		__split_huge_page_tail(head, i, lruvec, list);
2212 		/* Some pages can be beyond i_size: drop them from page cache */
2213 		if (head[i].index >= end) {
2214 			__ClearPageDirty(head + i);
2215 			__delete_from_page_cache(head + i, NULL);
2216 			if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
2217 				shmem_uncharge(head->mapping->host, 1);
2218 			put_page(head + i);
2219 		}
2220 	}
2221 
2222 	ClearPageCompound(head);
2223 	/* See comment in __split_huge_page_tail() */
2224 	if (PageAnon(head)) {
2225 		page_ref_inc(head);
2226 	} else {
2227 		/* Additional pin to radix tree */
2228 		page_ref_add(head, 2);
2229 		spin_unlock(&head->mapping->tree_lock);
2230 	}
2231 
2232 	spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
2233 
2234 	unfreeze_page(head);
2235 
2236 	for (i = 0; i < HPAGE_PMD_NR; i++) {
2237 		struct page *subpage = head + i;
2238 		if (subpage == page)
2239 			continue;
2240 		unlock_page(subpage);
2241 
2242 		/*
2243 		 * Subpages may be freed if there wasn't any mapping
2244 		 * like if add_to_swap() is running on a lru page that
2245 		 * had its mapping zapped. And freeing these pages
2246 		 * requires taking the lru_lock so we do the put_page
2247 		 * of the tail pages after the split is complete.
2248 		 */
2249 		put_page(subpage);
2250 	}
2251 }
2252 
2253 int total_mapcount(struct page *page)
2254 {
2255 	int i, compound, ret;
2256 
2257 	VM_BUG_ON_PAGE(PageTail(page), page);
2258 
2259 	if (likely(!PageCompound(page)))
2260 		return atomic_read(&page->_mapcount) + 1;
2261 
2262 	compound = compound_mapcount(page);
2263 	if (PageHuge(page))
2264 		return compound;
2265 	ret = compound;
2266 	for (i = 0; i < HPAGE_PMD_NR; i++)
2267 		ret += atomic_read(&page[i]._mapcount) + 1;
2268 	/* File pages has compound_mapcount included in _mapcount */
2269 	if (!PageAnon(page))
2270 		return ret - compound * HPAGE_PMD_NR;
2271 	if (PageDoubleMap(page))
2272 		ret -= HPAGE_PMD_NR;
2273 	return ret;
2274 }
2275 
2276 /*
2277  * This calculates accurately how many mappings a transparent hugepage
2278  * has (unlike page_mapcount() which isn't fully accurate). This full
2279  * accuracy is primarily needed to know if copy-on-write faults can
2280  * reuse the page and change the mapping to read-write instead of
2281  * copying them. At the same time this returns the total_mapcount too.
2282  *
2283  * The function returns the highest mapcount any one of the subpages
2284  * has. If the return value is one, even if different processes are
2285  * mapping different subpages of the transparent hugepage, they can
2286  * all reuse it, because each process is reusing a different subpage.
2287  *
2288  * The total_mapcount is instead counting all virtual mappings of the
2289  * subpages. If the total_mapcount is equal to "one", it tells the
2290  * caller all mappings belong to the same "mm" and in turn the
2291  * anon_vma of the transparent hugepage can become the vma->anon_vma
2292  * local one as no other process may be mapping any of the subpages.
2293  *
2294  * It would be more accurate to replace page_mapcount() with
2295  * page_trans_huge_mapcount(), however we only use
2296  * page_trans_huge_mapcount() in the copy-on-write faults where we
2297  * need full accuracy to avoid breaking page pinning, because
2298  * page_trans_huge_mapcount() is slower than page_mapcount().
2299  */
2300 int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
2301 {
2302 	int i, ret, _total_mapcount, mapcount;
2303 
2304 	/* hugetlbfs shouldn't call it */
2305 	VM_BUG_ON_PAGE(PageHuge(page), page);
2306 
2307 	if (likely(!PageTransCompound(page))) {
2308 		mapcount = atomic_read(&page->_mapcount) + 1;
2309 		if (total_mapcount)
2310 			*total_mapcount = mapcount;
2311 		return mapcount;
2312 	}
2313 
2314 	page = compound_head(page);
2315 
2316 	_total_mapcount = ret = 0;
2317 	for (i = 0; i < HPAGE_PMD_NR; i++) {
2318 		mapcount = atomic_read(&page[i]._mapcount) + 1;
2319 		ret = max(ret, mapcount);
2320 		_total_mapcount += mapcount;
2321 	}
2322 	if (PageDoubleMap(page)) {
2323 		ret -= 1;
2324 		_total_mapcount -= HPAGE_PMD_NR;
2325 	}
2326 	mapcount = compound_mapcount(page);
2327 	ret += mapcount;
2328 	_total_mapcount += mapcount;
2329 	if (total_mapcount)
2330 		*total_mapcount = _total_mapcount;
2331 	return ret;
2332 }
2333 
2334 /*
2335  * This function splits huge page into normal pages. @page can point to any
2336  * subpage of huge page to split. Split doesn't change the position of @page.
2337  *
2338  * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2339  * The huge page must be locked.
2340  *
2341  * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2342  *
2343  * Both head page and tail pages will inherit mapping, flags, and so on from
2344  * the hugepage.
2345  *
2346  * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2347  * they are not mapped.
2348  *
2349  * Returns 0 if the hugepage is split successfully.
2350  * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2351  * us.
2352  */
2353 int split_huge_page_to_list(struct page *page, struct list_head *list)
2354 {
2355 	struct page *head = compound_head(page);
2356 	struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
2357 	struct anon_vma *anon_vma = NULL;
2358 	struct address_space *mapping = NULL;
2359 	int count, mapcount, extra_pins, ret;
2360 	bool mlocked;
2361 	unsigned long flags;
2362 
2363 	VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
2364 	VM_BUG_ON_PAGE(!PageLocked(page), page);
2365 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
2366 	VM_BUG_ON_PAGE(!PageCompound(page), page);
2367 
2368 	if (PageAnon(head)) {
2369 		/*
2370 		 * The caller does not necessarily hold an mmap_sem that would
2371 		 * prevent the anon_vma disappearing so we first we take a
2372 		 * reference to it and then lock the anon_vma for write. This
2373 		 * is similar to page_lock_anon_vma_read except the write lock
2374 		 * is taken to serialise against parallel split or collapse
2375 		 * operations.
2376 		 */
2377 		anon_vma = page_get_anon_vma(head);
2378 		if (!anon_vma) {
2379 			ret = -EBUSY;
2380 			goto out;
2381 		}
2382 		extra_pins = 0;
2383 		mapping = NULL;
2384 		anon_vma_lock_write(anon_vma);
2385 	} else {
2386 		mapping = head->mapping;
2387 
2388 		/* Truncated ? */
2389 		if (!mapping) {
2390 			ret = -EBUSY;
2391 			goto out;
2392 		}
2393 
2394 		/* Addidional pins from radix tree */
2395 		extra_pins = HPAGE_PMD_NR;
2396 		anon_vma = NULL;
2397 		i_mmap_lock_read(mapping);
2398 	}
2399 
2400 	/*
2401 	 * Racy check if we can split the page, before freeze_page() will
2402 	 * split PMDs
2403 	 */
2404 	if (total_mapcount(head) != page_count(head) - extra_pins - 1) {
2405 		ret = -EBUSY;
2406 		goto out_unlock;
2407 	}
2408 
2409 	mlocked = PageMlocked(page);
2410 	freeze_page(head);
2411 	VM_BUG_ON_PAGE(compound_mapcount(head), head);
2412 
2413 	/* Make sure the page is not on per-CPU pagevec as it takes pin */
2414 	if (mlocked)
2415 		lru_add_drain();
2416 
2417 	/* prevent PageLRU to go away from under us, and freeze lru stats */
2418 	spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags);
2419 
2420 	if (mapping) {
2421 		void **pslot;
2422 
2423 		spin_lock(&mapping->tree_lock);
2424 		pslot = radix_tree_lookup_slot(&mapping->page_tree,
2425 				page_index(head));
2426 		/*
2427 		 * Check if the head page is present in radix tree.
2428 		 * We assume all tail are present too, if head is there.
2429 		 */
2430 		if (radix_tree_deref_slot_protected(pslot,
2431 					&mapping->tree_lock) != head)
2432 			goto fail;
2433 	}
2434 
2435 	/* Prevent deferred_split_scan() touching ->_refcount */
2436 	spin_lock(&pgdata->split_queue_lock);
2437 	count = page_count(head);
2438 	mapcount = total_mapcount(head);
2439 	if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
2440 		if (!list_empty(page_deferred_list(head))) {
2441 			pgdata->split_queue_len--;
2442 			list_del(page_deferred_list(head));
2443 		}
2444 		if (mapping)
2445 			__dec_node_page_state(page, NR_SHMEM_THPS);
2446 		spin_unlock(&pgdata->split_queue_lock);
2447 		__split_huge_page(page, list, flags);
2448 		ret = 0;
2449 	} else {
2450 		if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
2451 			pr_alert("total_mapcount: %u, page_count(): %u\n",
2452 					mapcount, count);
2453 			if (PageTail(page))
2454 				dump_page(head, NULL);
2455 			dump_page(page, "total_mapcount(head) > 0");
2456 			BUG();
2457 		}
2458 		spin_unlock(&pgdata->split_queue_lock);
2459 fail:		if (mapping)
2460 			spin_unlock(&mapping->tree_lock);
2461 		spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
2462 		unfreeze_page(head);
2463 		ret = -EBUSY;
2464 	}
2465 
2466 out_unlock:
2467 	if (anon_vma) {
2468 		anon_vma_unlock_write(anon_vma);
2469 		put_anon_vma(anon_vma);
2470 	}
2471 	if (mapping)
2472 		i_mmap_unlock_read(mapping);
2473 out:
2474 	count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
2475 	return ret;
2476 }
2477 
2478 void free_transhuge_page(struct page *page)
2479 {
2480 	struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
2481 	unsigned long flags;
2482 
2483 	spin_lock_irqsave(&pgdata->split_queue_lock, flags);
2484 	if (!list_empty(page_deferred_list(page))) {
2485 		pgdata->split_queue_len--;
2486 		list_del(page_deferred_list(page));
2487 	}
2488 	spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
2489 	free_compound_page(page);
2490 }
2491 
2492 void deferred_split_huge_page(struct page *page)
2493 {
2494 	struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
2495 	unsigned long flags;
2496 
2497 	VM_BUG_ON_PAGE(!PageTransHuge(page), page);
2498 
2499 	spin_lock_irqsave(&pgdata->split_queue_lock, flags);
2500 	if (list_empty(page_deferred_list(page))) {
2501 		count_vm_event(THP_DEFERRED_SPLIT_PAGE);
2502 		list_add_tail(page_deferred_list(page), &pgdata->split_queue);
2503 		pgdata->split_queue_len++;
2504 	}
2505 	spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
2506 }
2507 
2508 static unsigned long deferred_split_count(struct shrinker *shrink,
2509 		struct shrink_control *sc)
2510 {
2511 	struct pglist_data *pgdata = NODE_DATA(sc->nid);
2512 	return ACCESS_ONCE(pgdata->split_queue_len);
2513 }
2514 
2515 static unsigned long deferred_split_scan(struct shrinker *shrink,
2516 		struct shrink_control *sc)
2517 {
2518 	struct pglist_data *pgdata = NODE_DATA(sc->nid);
2519 	unsigned long flags;
2520 	LIST_HEAD(list), *pos, *next;
2521 	struct page *page;
2522 	int split = 0;
2523 
2524 	spin_lock_irqsave(&pgdata->split_queue_lock, flags);
2525 	/* Take pin on all head pages to avoid freeing them under us */
2526 	list_for_each_safe(pos, next, &pgdata->split_queue) {
2527 		page = list_entry((void *)pos, struct page, mapping);
2528 		page = compound_head(page);
2529 		if (get_page_unless_zero(page)) {
2530 			list_move(page_deferred_list(page), &list);
2531 		} else {
2532 			/* We lost race with put_compound_page() */
2533 			list_del_init(page_deferred_list(page));
2534 			pgdata->split_queue_len--;
2535 		}
2536 		if (!--sc->nr_to_scan)
2537 			break;
2538 	}
2539 	spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
2540 
2541 	list_for_each_safe(pos, next, &list) {
2542 		page = list_entry((void *)pos, struct page, mapping);
2543 		lock_page(page);
2544 		/* split_huge_page() removes page from list on success */
2545 		if (!split_huge_page(page))
2546 			split++;
2547 		unlock_page(page);
2548 		put_page(page);
2549 	}
2550 
2551 	spin_lock_irqsave(&pgdata->split_queue_lock, flags);
2552 	list_splice_tail(&list, &pgdata->split_queue);
2553 	spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
2554 
2555 	/*
2556 	 * Stop shrinker if we didn't split any page, but the queue is empty.
2557 	 * This can happen if pages were freed under us.
2558 	 */
2559 	if (!split && list_empty(&pgdata->split_queue))
2560 		return SHRINK_STOP;
2561 	return split;
2562 }
2563 
2564 static struct shrinker deferred_split_shrinker = {
2565 	.count_objects = deferred_split_count,
2566 	.scan_objects = deferred_split_scan,
2567 	.seeks = DEFAULT_SEEKS,
2568 	.flags = SHRINKER_NUMA_AWARE,
2569 };
2570 
2571 #ifdef CONFIG_DEBUG_FS
2572 static int split_huge_pages_set(void *data, u64 val)
2573 {
2574 	struct zone *zone;
2575 	struct page *page;
2576 	unsigned long pfn, max_zone_pfn;
2577 	unsigned long total = 0, split = 0;
2578 
2579 	if (val != 1)
2580 		return -EINVAL;
2581 
2582 	for_each_populated_zone(zone) {
2583 		max_zone_pfn = zone_end_pfn(zone);
2584 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
2585 			if (!pfn_valid(pfn))
2586 				continue;
2587 
2588 			page = pfn_to_page(pfn);
2589 			if (!get_page_unless_zero(page))
2590 				continue;
2591 
2592 			if (zone != page_zone(page))
2593 				goto next;
2594 
2595 			if (!PageHead(page) || PageHuge(page) || !PageLRU(page))
2596 				goto next;
2597 
2598 			total++;
2599 			lock_page(page);
2600 			if (!split_huge_page(page))
2601 				split++;
2602 			unlock_page(page);
2603 next:
2604 			put_page(page);
2605 		}
2606 	}
2607 
2608 	pr_info("%lu of %lu THP split\n", split, total);
2609 
2610 	return 0;
2611 }
2612 DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set,
2613 		"%llu\n");
2614 
2615 static int __init split_huge_pages_debugfs(void)
2616 {
2617 	void *ret;
2618 
2619 	ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
2620 			&split_huge_pages_fops);
2621 	if (!ret)
2622 		pr_warn("Failed to create split_huge_pages in debugfs");
2623 	return 0;
2624 }
2625 late_initcall(split_huge_pages_debugfs);
2626 #endif
2627