xref: /linux/mm/huge_memory.c (revision 22fd411ac9853f4becb3db9860f6d0b8398cac44)
1 /*
2  *  Copyright (C) 2009  Red Hat, Inc.
3  *
4  *  This work is licensed under the terms of the GNU GPL, version 2. See
5  *  the COPYING file in the top-level directory.
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/highmem.h>
11 #include <linux/hugetlb.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/mm_inline.h>
16 #include <linux/kthread.h>
17 #include <linux/khugepaged.h>
18 #include <linux/freezer.h>
19 #include <linux/mman.h>
20 #include <asm/tlb.h>
21 #include <asm/pgalloc.h>
22 #include "internal.h"
23 
24 /*
25  * By default transparent hugepage support is enabled for all mappings
26  * and khugepaged scans all mappings. Defrag is only invoked by
27  * khugepaged hugepage allocations and by page faults inside
28  * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
29  * allocations.
30  */
31 unsigned long transparent_hugepage_flags __read_mostly =
32 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
33 	(1<<TRANSPARENT_HUGEPAGE_FLAG)|
34 #endif
35 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
36 	(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
37 #endif
38 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
39 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
40 
41 /* default scan 8*512 pte (or vmas) every 30 second */
42 static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
43 static unsigned int khugepaged_pages_collapsed;
44 static unsigned int khugepaged_full_scans;
45 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
46 /* during fragmentation poll the hugepage allocator once every minute */
47 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
48 static struct task_struct *khugepaged_thread __read_mostly;
49 static DEFINE_MUTEX(khugepaged_mutex);
50 static DEFINE_SPINLOCK(khugepaged_mm_lock);
51 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
52 /*
53  * default collapse hugepages if there is at least one pte mapped like
54  * it would have happened if the vma was large enough during page
55  * fault.
56  */
57 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
58 
59 static int khugepaged(void *none);
60 static int mm_slots_hash_init(void);
61 static int khugepaged_slab_init(void);
62 static void khugepaged_slab_free(void);
63 
64 #define MM_SLOTS_HASH_HEADS 1024
65 static struct hlist_head *mm_slots_hash __read_mostly;
66 static struct kmem_cache *mm_slot_cache __read_mostly;
67 
68 /**
69  * struct mm_slot - hash lookup from mm to mm_slot
70  * @hash: hash collision list
71  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
72  * @mm: the mm that this information is valid for
73  */
74 struct mm_slot {
75 	struct hlist_node hash;
76 	struct list_head mm_node;
77 	struct mm_struct *mm;
78 };
79 
80 /**
81  * struct khugepaged_scan - cursor for scanning
82  * @mm_head: the head of the mm list to scan
83  * @mm_slot: the current mm_slot we are scanning
84  * @address: the next address inside that to be scanned
85  *
86  * There is only the one khugepaged_scan instance of this cursor structure.
87  */
88 struct khugepaged_scan {
89 	struct list_head mm_head;
90 	struct mm_slot *mm_slot;
91 	unsigned long address;
92 } khugepaged_scan = {
93 	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
94 };
95 
96 
97 static int set_recommended_min_free_kbytes(void)
98 {
99 	struct zone *zone;
100 	int nr_zones = 0;
101 	unsigned long recommended_min;
102 	extern int min_free_kbytes;
103 
104 	if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG,
105 		      &transparent_hugepage_flags) &&
106 	    !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
107 		      &transparent_hugepage_flags))
108 		return 0;
109 
110 	for_each_populated_zone(zone)
111 		nr_zones++;
112 
113 	/* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
114 	recommended_min = pageblock_nr_pages * nr_zones * 2;
115 
116 	/*
117 	 * Make sure that on average at least two pageblocks are almost free
118 	 * of another type, one for a migratetype to fall back to and a
119 	 * second to avoid subsequent fallbacks of other types There are 3
120 	 * MIGRATE_TYPES we care about.
121 	 */
122 	recommended_min += pageblock_nr_pages * nr_zones *
123 			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
124 
125 	/* don't ever allow to reserve more than 5% of the lowmem */
126 	recommended_min = min(recommended_min,
127 			      (unsigned long) nr_free_buffer_pages() / 20);
128 	recommended_min <<= (PAGE_SHIFT-10);
129 
130 	if (recommended_min > min_free_kbytes)
131 		min_free_kbytes = recommended_min;
132 	setup_per_zone_wmarks();
133 	return 0;
134 }
135 late_initcall(set_recommended_min_free_kbytes);
136 
137 static int start_khugepaged(void)
138 {
139 	int err = 0;
140 	if (khugepaged_enabled()) {
141 		int wakeup;
142 		if (unlikely(!mm_slot_cache || !mm_slots_hash)) {
143 			err = -ENOMEM;
144 			goto out;
145 		}
146 		mutex_lock(&khugepaged_mutex);
147 		if (!khugepaged_thread)
148 			khugepaged_thread = kthread_run(khugepaged, NULL,
149 							"khugepaged");
150 		if (unlikely(IS_ERR(khugepaged_thread))) {
151 			printk(KERN_ERR
152 			       "khugepaged: kthread_run(khugepaged) failed\n");
153 			err = PTR_ERR(khugepaged_thread);
154 			khugepaged_thread = NULL;
155 		}
156 		wakeup = !list_empty(&khugepaged_scan.mm_head);
157 		mutex_unlock(&khugepaged_mutex);
158 		if (wakeup)
159 			wake_up_interruptible(&khugepaged_wait);
160 
161 		set_recommended_min_free_kbytes();
162 	} else
163 		/* wakeup to exit */
164 		wake_up_interruptible(&khugepaged_wait);
165 out:
166 	return err;
167 }
168 
169 #ifdef CONFIG_SYSFS
170 
171 static ssize_t double_flag_show(struct kobject *kobj,
172 				struct kobj_attribute *attr, char *buf,
173 				enum transparent_hugepage_flag enabled,
174 				enum transparent_hugepage_flag req_madv)
175 {
176 	if (test_bit(enabled, &transparent_hugepage_flags)) {
177 		VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
178 		return sprintf(buf, "[always] madvise never\n");
179 	} else if (test_bit(req_madv, &transparent_hugepage_flags))
180 		return sprintf(buf, "always [madvise] never\n");
181 	else
182 		return sprintf(buf, "always madvise [never]\n");
183 }
184 static ssize_t double_flag_store(struct kobject *kobj,
185 				 struct kobj_attribute *attr,
186 				 const char *buf, size_t count,
187 				 enum transparent_hugepage_flag enabled,
188 				 enum transparent_hugepage_flag req_madv)
189 {
190 	if (!memcmp("always", buf,
191 		    min(sizeof("always")-1, count))) {
192 		set_bit(enabled, &transparent_hugepage_flags);
193 		clear_bit(req_madv, &transparent_hugepage_flags);
194 	} else if (!memcmp("madvise", buf,
195 			   min(sizeof("madvise")-1, count))) {
196 		clear_bit(enabled, &transparent_hugepage_flags);
197 		set_bit(req_madv, &transparent_hugepage_flags);
198 	} else if (!memcmp("never", buf,
199 			   min(sizeof("never")-1, count))) {
200 		clear_bit(enabled, &transparent_hugepage_flags);
201 		clear_bit(req_madv, &transparent_hugepage_flags);
202 	} else
203 		return -EINVAL;
204 
205 	return count;
206 }
207 
208 static ssize_t enabled_show(struct kobject *kobj,
209 			    struct kobj_attribute *attr, char *buf)
210 {
211 	return double_flag_show(kobj, attr, buf,
212 				TRANSPARENT_HUGEPAGE_FLAG,
213 				TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
214 }
215 static ssize_t enabled_store(struct kobject *kobj,
216 			     struct kobj_attribute *attr,
217 			     const char *buf, size_t count)
218 {
219 	ssize_t ret;
220 
221 	ret = double_flag_store(kobj, attr, buf, count,
222 				TRANSPARENT_HUGEPAGE_FLAG,
223 				TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
224 
225 	if (ret > 0) {
226 		int err = start_khugepaged();
227 		if (err)
228 			ret = err;
229 	}
230 
231 	if (ret > 0 &&
232 	    (test_bit(TRANSPARENT_HUGEPAGE_FLAG,
233 		      &transparent_hugepage_flags) ||
234 	     test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
235 		      &transparent_hugepage_flags)))
236 		set_recommended_min_free_kbytes();
237 
238 	return ret;
239 }
240 static struct kobj_attribute enabled_attr =
241 	__ATTR(enabled, 0644, enabled_show, enabled_store);
242 
243 static ssize_t single_flag_show(struct kobject *kobj,
244 				struct kobj_attribute *attr, char *buf,
245 				enum transparent_hugepage_flag flag)
246 {
247 	if (test_bit(flag, &transparent_hugepage_flags))
248 		return sprintf(buf, "[yes] no\n");
249 	else
250 		return sprintf(buf, "yes [no]\n");
251 }
252 static ssize_t single_flag_store(struct kobject *kobj,
253 				 struct kobj_attribute *attr,
254 				 const char *buf, size_t count,
255 				 enum transparent_hugepage_flag flag)
256 {
257 	if (!memcmp("yes", buf,
258 		    min(sizeof("yes")-1, count))) {
259 		set_bit(flag, &transparent_hugepage_flags);
260 	} else if (!memcmp("no", buf,
261 			   min(sizeof("no")-1, count))) {
262 		clear_bit(flag, &transparent_hugepage_flags);
263 	} else
264 		return -EINVAL;
265 
266 	return count;
267 }
268 
269 /*
270  * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
271  * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
272  * memory just to allocate one more hugepage.
273  */
274 static ssize_t defrag_show(struct kobject *kobj,
275 			   struct kobj_attribute *attr, char *buf)
276 {
277 	return double_flag_show(kobj, attr, buf,
278 				TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
279 				TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
280 }
281 static ssize_t defrag_store(struct kobject *kobj,
282 			    struct kobj_attribute *attr,
283 			    const char *buf, size_t count)
284 {
285 	return double_flag_store(kobj, attr, buf, count,
286 				 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
287 				 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
288 }
289 static struct kobj_attribute defrag_attr =
290 	__ATTR(defrag, 0644, defrag_show, defrag_store);
291 
292 #ifdef CONFIG_DEBUG_VM
293 static ssize_t debug_cow_show(struct kobject *kobj,
294 				struct kobj_attribute *attr, char *buf)
295 {
296 	return single_flag_show(kobj, attr, buf,
297 				TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
298 }
299 static ssize_t debug_cow_store(struct kobject *kobj,
300 			       struct kobj_attribute *attr,
301 			       const char *buf, size_t count)
302 {
303 	return single_flag_store(kobj, attr, buf, count,
304 				 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
305 }
306 static struct kobj_attribute debug_cow_attr =
307 	__ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
308 #endif /* CONFIG_DEBUG_VM */
309 
310 static struct attribute *hugepage_attr[] = {
311 	&enabled_attr.attr,
312 	&defrag_attr.attr,
313 #ifdef CONFIG_DEBUG_VM
314 	&debug_cow_attr.attr,
315 #endif
316 	NULL,
317 };
318 
319 static struct attribute_group hugepage_attr_group = {
320 	.attrs = hugepage_attr,
321 };
322 
323 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
324 					 struct kobj_attribute *attr,
325 					 char *buf)
326 {
327 	return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
328 }
329 
330 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
331 					  struct kobj_attribute *attr,
332 					  const char *buf, size_t count)
333 {
334 	unsigned long msecs;
335 	int err;
336 
337 	err = strict_strtoul(buf, 10, &msecs);
338 	if (err || msecs > UINT_MAX)
339 		return -EINVAL;
340 
341 	khugepaged_scan_sleep_millisecs = msecs;
342 	wake_up_interruptible(&khugepaged_wait);
343 
344 	return count;
345 }
346 static struct kobj_attribute scan_sleep_millisecs_attr =
347 	__ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
348 	       scan_sleep_millisecs_store);
349 
350 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
351 					  struct kobj_attribute *attr,
352 					  char *buf)
353 {
354 	return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
355 }
356 
357 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
358 					   struct kobj_attribute *attr,
359 					   const char *buf, size_t count)
360 {
361 	unsigned long msecs;
362 	int err;
363 
364 	err = strict_strtoul(buf, 10, &msecs);
365 	if (err || msecs > UINT_MAX)
366 		return -EINVAL;
367 
368 	khugepaged_alloc_sleep_millisecs = msecs;
369 	wake_up_interruptible(&khugepaged_wait);
370 
371 	return count;
372 }
373 static struct kobj_attribute alloc_sleep_millisecs_attr =
374 	__ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
375 	       alloc_sleep_millisecs_store);
376 
377 static ssize_t pages_to_scan_show(struct kobject *kobj,
378 				  struct kobj_attribute *attr,
379 				  char *buf)
380 {
381 	return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
382 }
383 static ssize_t pages_to_scan_store(struct kobject *kobj,
384 				   struct kobj_attribute *attr,
385 				   const char *buf, size_t count)
386 {
387 	int err;
388 	unsigned long pages;
389 
390 	err = strict_strtoul(buf, 10, &pages);
391 	if (err || !pages || pages > UINT_MAX)
392 		return -EINVAL;
393 
394 	khugepaged_pages_to_scan = pages;
395 
396 	return count;
397 }
398 static struct kobj_attribute pages_to_scan_attr =
399 	__ATTR(pages_to_scan, 0644, pages_to_scan_show,
400 	       pages_to_scan_store);
401 
402 static ssize_t pages_collapsed_show(struct kobject *kobj,
403 				    struct kobj_attribute *attr,
404 				    char *buf)
405 {
406 	return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
407 }
408 static struct kobj_attribute pages_collapsed_attr =
409 	__ATTR_RO(pages_collapsed);
410 
411 static ssize_t full_scans_show(struct kobject *kobj,
412 			       struct kobj_attribute *attr,
413 			       char *buf)
414 {
415 	return sprintf(buf, "%u\n", khugepaged_full_scans);
416 }
417 static struct kobj_attribute full_scans_attr =
418 	__ATTR_RO(full_scans);
419 
420 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
421 				      struct kobj_attribute *attr, char *buf)
422 {
423 	return single_flag_show(kobj, attr, buf,
424 				TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
425 }
426 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
427 				       struct kobj_attribute *attr,
428 				       const char *buf, size_t count)
429 {
430 	return single_flag_store(kobj, attr, buf, count,
431 				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
432 }
433 static struct kobj_attribute khugepaged_defrag_attr =
434 	__ATTR(defrag, 0644, khugepaged_defrag_show,
435 	       khugepaged_defrag_store);
436 
437 /*
438  * max_ptes_none controls if khugepaged should collapse hugepages over
439  * any unmapped ptes in turn potentially increasing the memory
440  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
441  * reduce the available free memory in the system as it
442  * runs. Increasing max_ptes_none will instead potentially reduce the
443  * free memory in the system during the khugepaged scan.
444  */
445 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
446 					     struct kobj_attribute *attr,
447 					     char *buf)
448 {
449 	return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
450 }
451 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
452 					      struct kobj_attribute *attr,
453 					      const char *buf, size_t count)
454 {
455 	int err;
456 	unsigned long max_ptes_none;
457 
458 	err = strict_strtoul(buf, 10, &max_ptes_none);
459 	if (err || max_ptes_none > HPAGE_PMD_NR-1)
460 		return -EINVAL;
461 
462 	khugepaged_max_ptes_none = max_ptes_none;
463 
464 	return count;
465 }
466 static struct kobj_attribute khugepaged_max_ptes_none_attr =
467 	__ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
468 	       khugepaged_max_ptes_none_store);
469 
470 static struct attribute *khugepaged_attr[] = {
471 	&khugepaged_defrag_attr.attr,
472 	&khugepaged_max_ptes_none_attr.attr,
473 	&pages_to_scan_attr.attr,
474 	&pages_collapsed_attr.attr,
475 	&full_scans_attr.attr,
476 	&scan_sleep_millisecs_attr.attr,
477 	&alloc_sleep_millisecs_attr.attr,
478 	NULL,
479 };
480 
481 static struct attribute_group khugepaged_attr_group = {
482 	.attrs = khugepaged_attr,
483 	.name = "khugepaged",
484 };
485 #endif /* CONFIG_SYSFS */
486 
487 static int __init hugepage_init(void)
488 {
489 	int err;
490 #ifdef CONFIG_SYSFS
491 	static struct kobject *hugepage_kobj;
492 #endif
493 
494 	err = -EINVAL;
495 	if (!has_transparent_hugepage()) {
496 		transparent_hugepage_flags = 0;
497 		goto out;
498 	}
499 
500 #ifdef CONFIG_SYSFS
501 	err = -ENOMEM;
502 	hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
503 	if (unlikely(!hugepage_kobj)) {
504 		printk(KERN_ERR "hugepage: failed kobject create\n");
505 		goto out;
506 	}
507 
508 	err = sysfs_create_group(hugepage_kobj, &hugepage_attr_group);
509 	if (err) {
510 		printk(KERN_ERR "hugepage: failed register hugeage group\n");
511 		goto out;
512 	}
513 
514 	err = sysfs_create_group(hugepage_kobj, &khugepaged_attr_group);
515 	if (err) {
516 		printk(KERN_ERR "hugepage: failed register hugeage group\n");
517 		goto out;
518 	}
519 #endif
520 
521 	err = khugepaged_slab_init();
522 	if (err)
523 		goto out;
524 
525 	err = mm_slots_hash_init();
526 	if (err) {
527 		khugepaged_slab_free();
528 		goto out;
529 	}
530 
531 	/*
532 	 * By default disable transparent hugepages on smaller systems,
533 	 * where the extra memory used could hurt more than TLB overhead
534 	 * is likely to save.  The admin can still enable it through /sys.
535 	 */
536 	if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
537 		transparent_hugepage_flags = 0;
538 
539 	start_khugepaged();
540 
541 	set_recommended_min_free_kbytes();
542 
543 out:
544 	return err;
545 }
546 module_init(hugepage_init)
547 
548 static int __init setup_transparent_hugepage(char *str)
549 {
550 	int ret = 0;
551 	if (!str)
552 		goto out;
553 	if (!strcmp(str, "always")) {
554 		set_bit(TRANSPARENT_HUGEPAGE_FLAG,
555 			&transparent_hugepage_flags);
556 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
557 			  &transparent_hugepage_flags);
558 		ret = 1;
559 	} else if (!strcmp(str, "madvise")) {
560 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
561 			  &transparent_hugepage_flags);
562 		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
563 			&transparent_hugepage_flags);
564 		ret = 1;
565 	} else if (!strcmp(str, "never")) {
566 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
567 			  &transparent_hugepage_flags);
568 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
569 			  &transparent_hugepage_flags);
570 		ret = 1;
571 	}
572 out:
573 	if (!ret)
574 		printk(KERN_WARNING
575 		       "transparent_hugepage= cannot parse, ignored\n");
576 	return ret;
577 }
578 __setup("transparent_hugepage=", setup_transparent_hugepage);
579 
580 static void prepare_pmd_huge_pte(pgtable_t pgtable,
581 				 struct mm_struct *mm)
582 {
583 	assert_spin_locked(&mm->page_table_lock);
584 
585 	/* FIFO */
586 	if (!mm->pmd_huge_pte)
587 		INIT_LIST_HEAD(&pgtable->lru);
588 	else
589 		list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
590 	mm->pmd_huge_pte = pgtable;
591 }
592 
593 static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
594 {
595 	if (likely(vma->vm_flags & VM_WRITE))
596 		pmd = pmd_mkwrite(pmd);
597 	return pmd;
598 }
599 
600 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
601 					struct vm_area_struct *vma,
602 					unsigned long haddr, pmd_t *pmd,
603 					struct page *page)
604 {
605 	int ret = 0;
606 	pgtable_t pgtable;
607 
608 	VM_BUG_ON(!PageCompound(page));
609 	pgtable = pte_alloc_one(mm, haddr);
610 	if (unlikely(!pgtable)) {
611 		mem_cgroup_uncharge_page(page);
612 		put_page(page);
613 		return VM_FAULT_OOM;
614 	}
615 
616 	clear_huge_page(page, haddr, HPAGE_PMD_NR);
617 	__SetPageUptodate(page);
618 
619 	spin_lock(&mm->page_table_lock);
620 	if (unlikely(!pmd_none(*pmd))) {
621 		spin_unlock(&mm->page_table_lock);
622 		mem_cgroup_uncharge_page(page);
623 		put_page(page);
624 		pte_free(mm, pgtable);
625 	} else {
626 		pmd_t entry;
627 		entry = mk_pmd(page, vma->vm_page_prot);
628 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
629 		entry = pmd_mkhuge(entry);
630 		/*
631 		 * The spinlocking to take the lru_lock inside
632 		 * page_add_new_anon_rmap() acts as a full memory
633 		 * barrier to be sure clear_huge_page writes become
634 		 * visible after the set_pmd_at() write.
635 		 */
636 		page_add_new_anon_rmap(page, vma, haddr);
637 		set_pmd_at(mm, haddr, pmd, entry);
638 		prepare_pmd_huge_pte(pgtable, mm);
639 		add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
640 		spin_unlock(&mm->page_table_lock);
641 	}
642 
643 	return ret;
644 }
645 
646 static inline gfp_t alloc_hugepage_gfpmask(int defrag)
647 {
648 	return GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT);
649 }
650 
651 static inline struct page *alloc_hugepage_vma(int defrag,
652 					      struct vm_area_struct *vma,
653 					      unsigned long haddr)
654 {
655 	return alloc_pages_vma(alloc_hugepage_gfpmask(defrag),
656 			       HPAGE_PMD_ORDER, vma, haddr);
657 }
658 
659 #ifndef CONFIG_NUMA
660 static inline struct page *alloc_hugepage(int defrag)
661 {
662 	return alloc_pages(alloc_hugepage_gfpmask(defrag),
663 			   HPAGE_PMD_ORDER);
664 }
665 #endif
666 
667 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
668 			       unsigned long address, pmd_t *pmd,
669 			       unsigned int flags)
670 {
671 	struct page *page;
672 	unsigned long haddr = address & HPAGE_PMD_MASK;
673 	pte_t *pte;
674 
675 	if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
676 		if (unlikely(anon_vma_prepare(vma)))
677 			return VM_FAULT_OOM;
678 		if (unlikely(khugepaged_enter(vma)))
679 			return VM_FAULT_OOM;
680 		page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
681 					  vma, haddr);
682 		if (unlikely(!page))
683 			goto out;
684 		if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
685 			put_page(page);
686 			goto out;
687 		}
688 
689 		return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
690 	}
691 out:
692 	/*
693 	 * Use __pte_alloc instead of pte_alloc_map, because we can't
694 	 * run pte_offset_map on the pmd, if an huge pmd could
695 	 * materialize from under us from a different thread.
696 	 */
697 	if (unlikely(__pte_alloc(mm, vma, pmd, address)))
698 		return VM_FAULT_OOM;
699 	/* if an huge pmd materialized from under us just retry later */
700 	if (unlikely(pmd_trans_huge(*pmd)))
701 		return 0;
702 	/*
703 	 * A regular pmd is established and it can't morph into a huge pmd
704 	 * from under us anymore at this point because we hold the mmap_sem
705 	 * read mode and khugepaged takes it in write mode. So now it's
706 	 * safe to run pte_offset_map().
707 	 */
708 	pte = pte_offset_map(pmd, address);
709 	return handle_pte_fault(mm, vma, address, pte, pmd, flags);
710 }
711 
712 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
713 		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
714 		  struct vm_area_struct *vma)
715 {
716 	struct page *src_page;
717 	pmd_t pmd;
718 	pgtable_t pgtable;
719 	int ret;
720 
721 	ret = -ENOMEM;
722 	pgtable = pte_alloc_one(dst_mm, addr);
723 	if (unlikely(!pgtable))
724 		goto out;
725 
726 	spin_lock(&dst_mm->page_table_lock);
727 	spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
728 
729 	ret = -EAGAIN;
730 	pmd = *src_pmd;
731 	if (unlikely(!pmd_trans_huge(pmd))) {
732 		pte_free(dst_mm, pgtable);
733 		goto out_unlock;
734 	}
735 	if (unlikely(pmd_trans_splitting(pmd))) {
736 		/* split huge page running from under us */
737 		spin_unlock(&src_mm->page_table_lock);
738 		spin_unlock(&dst_mm->page_table_lock);
739 		pte_free(dst_mm, pgtable);
740 
741 		wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
742 		goto out;
743 	}
744 	src_page = pmd_page(pmd);
745 	VM_BUG_ON(!PageHead(src_page));
746 	get_page(src_page);
747 	page_dup_rmap(src_page);
748 	add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
749 
750 	pmdp_set_wrprotect(src_mm, addr, src_pmd);
751 	pmd = pmd_mkold(pmd_wrprotect(pmd));
752 	set_pmd_at(dst_mm, addr, dst_pmd, pmd);
753 	prepare_pmd_huge_pte(pgtable, dst_mm);
754 
755 	ret = 0;
756 out_unlock:
757 	spin_unlock(&src_mm->page_table_lock);
758 	spin_unlock(&dst_mm->page_table_lock);
759 out:
760 	return ret;
761 }
762 
763 /* no "address" argument so destroys page coloring of some arch */
764 pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
765 {
766 	pgtable_t pgtable;
767 
768 	assert_spin_locked(&mm->page_table_lock);
769 
770 	/* FIFO */
771 	pgtable = mm->pmd_huge_pte;
772 	if (list_empty(&pgtable->lru))
773 		mm->pmd_huge_pte = NULL;
774 	else {
775 		mm->pmd_huge_pte = list_entry(pgtable->lru.next,
776 					      struct page, lru);
777 		list_del(&pgtable->lru);
778 	}
779 	return pgtable;
780 }
781 
782 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
783 					struct vm_area_struct *vma,
784 					unsigned long address,
785 					pmd_t *pmd, pmd_t orig_pmd,
786 					struct page *page,
787 					unsigned long haddr)
788 {
789 	pgtable_t pgtable;
790 	pmd_t _pmd;
791 	int ret = 0, i;
792 	struct page **pages;
793 
794 	pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
795 			GFP_KERNEL);
796 	if (unlikely(!pages)) {
797 		ret |= VM_FAULT_OOM;
798 		goto out;
799 	}
800 
801 	for (i = 0; i < HPAGE_PMD_NR; i++) {
802 		pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
803 					  vma, address);
804 		if (unlikely(!pages[i] ||
805 			     mem_cgroup_newpage_charge(pages[i], mm,
806 						       GFP_KERNEL))) {
807 			if (pages[i])
808 				put_page(pages[i]);
809 			mem_cgroup_uncharge_start();
810 			while (--i >= 0) {
811 				mem_cgroup_uncharge_page(pages[i]);
812 				put_page(pages[i]);
813 			}
814 			mem_cgroup_uncharge_end();
815 			kfree(pages);
816 			ret |= VM_FAULT_OOM;
817 			goto out;
818 		}
819 	}
820 
821 	for (i = 0; i < HPAGE_PMD_NR; i++) {
822 		copy_user_highpage(pages[i], page + i,
823 				   haddr + PAGE_SHIFT*i, vma);
824 		__SetPageUptodate(pages[i]);
825 		cond_resched();
826 	}
827 
828 	spin_lock(&mm->page_table_lock);
829 	if (unlikely(!pmd_same(*pmd, orig_pmd)))
830 		goto out_free_pages;
831 	VM_BUG_ON(!PageHead(page));
832 
833 	pmdp_clear_flush_notify(vma, haddr, pmd);
834 	/* leave pmd empty until pte is filled */
835 
836 	pgtable = get_pmd_huge_pte(mm);
837 	pmd_populate(mm, &_pmd, pgtable);
838 
839 	for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
840 		pte_t *pte, entry;
841 		entry = mk_pte(pages[i], vma->vm_page_prot);
842 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
843 		page_add_new_anon_rmap(pages[i], vma, haddr);
844 		pte = pte_offset_map(&_pmd, haddr);
845 		VM_BUG_ON(!pte_none(*pte));
846 		set_pte_at(mm, haddr, pte, entry);
847 		pte_unmap(pte);
848 	}
849 	kfree(pages);
850 
851 	mm->nr_ptes++;
852 	smp_wmb(); /* make pte visible before pmd */
853 	pmd_populate(mm, pmd, pgtable);
854 	page_remove_rmap(page);
855 	spin_unlock(&mm->page_table_lock);
856 
857 	ret |= VM_FAULT_WRITE;
858 	put_page(page);
859 
860 out:
861 	return ret;
862 
863 out_free_pages:
864 	spin_unlock(&mm->page_table_lock);
865 	mem_cgroup_uncharge_start();
866 	for (i = 0; i < HPAGE_PMD_NR; i++) {
867 		mem_cgroup_uncharge_page(pages[i]);
868 		put_page(pages[i]);
869 	}
870 	mem_cgroup_uncharge_end();
871 	kfree(pages);
872 	goto out;
873 }
874 
875 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
876 			unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
877 {
878 	int ret = 0;
879 	struct page *page, *new_page;
880 	unsigned long haddr;
881 
882 	VM_BUG_ON(!vma->anon_vma);
883 	spin_lock(&mm->page_table_lock);
884 	if (unlikely(!pmd_same(*pmd, orig_pmd)))
885 		goto out_unlock;
886 
887 	page = pmd_page(orig_pmd);
888 	VM_BUG_ON(!PageCompound(page) || !PageHead(page));
889 	haddr = address & HPAGE_PMD_MASK;
890 	if (page_mapcount(page) == 1) {
891 		pmd_t entry;
892 		entry = pmd_mkyoung(orig_pmd);
893 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
894 		if (pmdp_set_access_flags(vma, haddr, pmd, entry,  1))
895 			update_mmu_cache(vma, address, entry);
896 		ret |= VM_FAULT_WRITE;
897 		goto out_unlock;
898 	}
899 	get_page(page);
900 	spin_unlock(&mm->page_table_lock);
901 
902 	if (transparent_hugepage_enabled(vma) &&
903 	    !transparent_hugepage_debug_cow())
904 		new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
905 					      vma, haddr);
906 	else
907 		new_page = NULL;
908 
909 	if (unlikely(!new_page)) {
910 		ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
911 						   pmd, orig_pmd, page, haddr);
912 		put_page(page);
913 		goto out;
914 	}
915 
916 	if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
917 		put_page(new_page);
918 		put_page(page);
919 		ret |= VM_FAULT_OOM;
920 		goto out;
921 	}
922 
923 	copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
924 	__SetPageUptodate(new_page);
925 
926 	spin_lock(&mm->page_table_lock);
927 	put_page(page);
928 	if (unlikely(!pmd_same(*pmd, orig_pmd))) {
929 		mem_cgroup_uncharge_page(new_page);
930 		put_page(new_page);
931 	} else {
932 		pmd_t entry;
933 		VM_BUG_ON(!PageHead(page));
934 		entry = mk_pmd(new_page, vma->vm_page_prot);
935 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
936 		entry = pmd_mkhuge(entry);
937 		pmdp_clear_flush_notify(vma, haddr, pmd);
938 		page_add_new_anon_rmap(new_page, vma, haddr);
939 		set_pmd_at(mm, haddr, pmd, entry);
940 		update_mmu_cache(vma, address, entry);
941 		page_remove_rmap(page);
942 		put_page(page);
943 		ret |= VM_FAULT_WRITE;
944 	}
945 out_unlock:
946 	spin_unlock(&mm->page_table_lock);
947 out:
948 	return ret;
949 }
950 
951 struct page *follow_trans_huge_pmd(struct mm_struct *mm,
952 				   unsigned long addr,
953 				   pmd_t *pmd,
954 				   unsigned int flags)
955 {
956 	struct page *page = NULL;
957 
958 	assert_spin_locked(&mm->page_table_lock);
959 
960 	if (flags & FOLL_WRITE && !pmd_write(*pmd))
961 		goto out;
962 
963 	page = pmd_page(*pmd);
964 	VM_BUG_ON(!PageHead(page));
965 	if (flags & FOLL_TOUCH) {
966 		pmd_t _pmd;
967 		/*
968 		 * We should set the dirty bit only for FOLL_WRITE but
969 		 * for now the dirty bit in the pmd is meaningless.
970 		 * And if the dirty bit will become meaningful and
971 		 * we'll only set it with FOLL_WRITE, an atomic
972 		 * set_bit will be required on the pmd to set the
973 		 * young bit, instead of the current set_pmd_at.
974 		 */
975 		_pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
976 		set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
977 	}
978 	page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
979 	VM_BUG_ON(!PageCompound(page));
980 	if (flags & FOLL_GET)
981 		get_page(page);
982 
983 out:
984 	return page;
985 }
986 
987 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
988 		 pmd_t *pmd)
989 {
990 	int ret = 0;
991 
992 	spin_lock(&tlb->mm->page_table_lock);
993 	if (likely(pmd_trans_huge(*pmd))) {
994 		if (unlikely(pmd_trans_splitting(*pmd))) {
995 			spin_unlock(&tlb->mm->page_table_lock);
996 			wait_split_huge_page(vma->anon_vma,
997 					     pmd);
998 		} else {
999 			struct page *page;
1000 			pgtable_t pgtable;
1001 			pgtable = get_pmd_huge_pte(tlb->mm);
1002 			page = pmd_page(*pmd);
1003 			pmd_clear(pmd);
1004 			page_remove_rmap(page);
1005 			VM_BUG_ON(page_mapcount(page) < 0);
1006 			add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1007 			VM_BUG_ON(!PageHead(page));
1008 			spin_unlock(&tlb->mm->page_table_lock);
1009 			tlb_remove_page(tlb, page);
1010 			pte_free(tlb->mm, pgtable);
1011 			ret = 1;
1012 		}
1013 	} else
1014 		spin_unlock(&tlb->mm->page_table_lock);
1015 
1016 	return ret;
1017 }
1018 
1019 int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1020 		unsigned long addr, unsigned long end,
1021 		unsigned char *vec)
1022 {
1023 	int ret = 0;
1024 
1025 	spin_lock(&vma->vm_mm->page_table_lock);
1026 	if (likely(pmd_trans_huge(*pmd))) {
1027 		ret = !pmd_trans_splitting(*pmd);
1028 		spin_unlock(&vma->vm_mm->page_table_lock);
1029 		if (unlikely(!ret))
1030 			wait_split_huge_page(vma->anon_vma, pmd);
1031 		else {
1032 			/*
1033 			 * All logical pages in the range are present
1034 			 * if backed by a huge page.
1035 			 */
1036 			memset(vec, 1, (end - addr) >> PAGE_SHIFT);
1037 		}
1038 	} else
1039 		spin_unlock(&vma->vm_mm->page_table_lock);
1040 
1041 	return ret;
1042 }
1043 
1044 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1045 		unsigned long addr, pgprot_t newprot)
1046 {
1047 	struct mm_struct *mm = vma->vm_mm;
1048 	int ret = 0;
1049 
1050 	spin_lock(&mm->page_table_lock);
1051 	if (likely(pmd_trans_huge(*pmd))) {
1052 		if (unlikely(pmd_trans_splitting(*pmd))) {
1053 			spin_unlock(&mm->page_table_lock);
1054 			wait_split_huge_page(vma->anon_vma, pmd);
1055 		} else {
1056 			pmd_t entry;
1057 
1058 			entry = pmdp_get_and_clear(mm, addr, pmd);
1059 			entry = pmd_modify(entry, newprot);
1060 			set_pmd_at(mm, addr, pmd, entry);
1061 			spin_unlock(&vma->vm_mm->page_table_lock);
1062 			flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
1063 			ret = 1;
1064 		}
1065 	} else
1066 		spin_unlock(&vma->vm_mm->page_table_lock);
1067 
1068 	return ret;
1069 }
1070 
1071 pmd_t *page_check_address_pmd(struct page *page,
1072 			      struct mm_struct *mm,
1073 			      unsigned long address,
1074 			      enum page_check_address_pmd_flag flag)
1075 {
1076 	pgd_t *pgd;
1077 	pud_t *pud;
1078 	pmd_t *pmd, *ret = NULL;
1079 
1080 	if (address & ~HPAGE_PMD_MASK)
1081 		goto out;
1082 
1083 	pgd = pgd_offset(mm, address);
1084 	if (!pgd_present(*pgd))
1085 		goto out;
1086 
1087 	pud = pud_offset(pgd, address);
1088 	if (!pud_present(*pud))
1089 		goto out;
1090 
1091 	pmd = pmd_offset(pud, address);
1092 	if (pmd_none(*pmd))
1093 		goto out;
1094 	if (pmd_page(*pmd) != page)
1095 		goto out;
1096 	/*
1097 	 * split_vma() may create temporary aliased mappings. There is
1098 	 * no risk as long as all huge pmd are found and have their
1099 	 * splitting bit set before __split_huge_page_refcount
1100 	 * runs. Finding the same huge pmd more than once during the
1101 	 * same rmap walk is not a problem.
1102 	 */
1103 	if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1104 	    pmd_trans_splitting(*pmd))
1105 		goto out;
1106 	if (pmd_trans_huge(*pmd)) {
1107 		VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1108 			  !pmd_trans_splitting(*pmd));
1109 		ret = pmd;
1110 	}
1111 out:
1112 	return ret;
1113 }
1114 
1115 static int __split_huge_page_splitting(struct page *page,
1116 				       struct vm_area_struct *vma,
1117 				       unsigned long address)
1118 {
1119 	struct mm_struct *mm = vma->vm_mm;
1120 	pmd_t *pmd;
1121 	int ret = 0;
1122 
1123 	spin_lock(&mm->page_table_lock);
1124 	pmd = page_check_address_pmd(page, mm, address,
1125 				     PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
1126 	if (pmd) {
1127 		/*
1128 		 * We can't temporarily set the pmd to null in order
1129 		 * to split it, the pmd must remain marked huge at all
1130 		 * times or the VM won't take the pmd_trans_huge paths
1131 		 * and it won't wait on the anon_vma->root->lock to
1132 		 * serialize against split_huge_page*.
1133 		 */
1134 		pmdp_splitting_flush_notify(vma, address, pmd);
1135 		ret = 1;
1136 	}
1137 	spin_unlock(&mm->page_table_lock);
1138 
1139 	return ret;
1140 }
1141 
1142 static void __split_huge_page_refcount(struct page *page)
1143 {
1144 	int i;
1145 	unsigned long head_index = page->index;
1146 	struct zone *zone = page_zone(page);
1147 	int zonestat;
1148 
1149 	/* prevent PageLRU to go away from under us, and freeze lru stats */
1150 	spin_lock_irq(&zone->lru_lock);
1151 	compound_lock(page);
1152 
1153 	for (i = 1; i < HPAGE_PMD_NR; i++) {
1154 		struct page *page_tail = page + i;
1155 
1156 		/* tail_page->_count cannot change */
1157 		atomic_sub(atomic_read(&page_tail->_count), &page->_count);
1158 		BUG_ON(page_count(page) <= 0);
1159 		atomic_add(page_mapcount(page) + 1, &page_tail->_count);
1160 		BUG_ON(atomic_read(&page_tail->_count) <= 0);
1161 
1162 		/* after clearing PageTail the gup refcount can be released */
1163 		smp_mb();
1164 
1165 		page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1166 		page_tail->flags |= (page->flags &
1167 				     ((1L << PG_referenced) |
1168 				      (1L << PG_swapbacked) |
1169 				      (1L << PG_mlocked) |
1170 				      (1L << PG_uptodate)));
1171 		page_tail->flags |= (1L << PG_dirty);
1172 
1173 		/*
1174 		 * 1) clear PageTail before overwriting first_page
1175 		 * 2) clear PageTail before clearing PageHead for VM_BUG_ON
1176 		 */
1177 		smp_wmb();
1178 
1179 		/*
1180 		 * __split_huge_page_splitting() already set the
1181 		 * splitting bit in all pmd that could map this
1182 		 * hugepage, that will ensure no CPU can alter the
1183 		 * mapcount on the head page. The mapcount is only
1184 		 * accounted in the head page and it has to be
1185 		 * transferred to all tail pages in the below code. So
1186 		 * for this code to be safe, the split the mapcount
1187 		 * can't change. But that doesn't mean userland can't
1188 		 * keep changing and reading the page contents while
1189 		 * we transfer the mapcount, so the pmd splitting
1190 		 * status is achieved setting a reserved bit in the
1191 		 * pmd, not by clearing the present bit.
1192 		*/
1193 		BUG_ON(page_mapcount(page_tail));
1194 		page_tail->_mapcount = page->_mapcount;
1195 
1196 		BUG_ON(page_tail->mapping);
1197 		page_tail->mapping = page->mapping;
1198 
1199 		page_tail->index = ++head_index;
1200 
1201 		BUG_ON(!PageAnon(page_tail));
1202 		BUG_ON(!PageUptodate(page_tail));
1203 		BUG_ON(!PageDirty(page_tail));
1204 		BUG_ON(!PageSwapBacked(page_tail));
1205 
1206 		mem_cgroup_split_huge_fixup(page, page_tail);
1207 
1208 		lru_add_page_tail(zone, page, page_tail);
1209 	}
1210 
1211 	__dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1212 	__mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
1213 
1214 	/*
1215 	 * A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics,
1216 	 * so adjust those appropriately if this page is on the LRU.
1217 	 */
1218 	if (PageLRU(page)) {
1219 		zonestat = NR_LRU_BASE + page_lru(page);
1220 		__mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1));
1221 	}
1222 
1223 	ClearPageCompound(page);
1224 	compound_unlock(page);
1225 	spin_unlock_irq(&zone->lru_lock);
1226 
1227 	for (i = 1; i < HPAGE_PMD_NR; i++) {
1228 		struct page *page_tail = page + i;
1229 		BUG_ON(page_count(page_tail) <= 0);
1230 		/*
1231 		 * Tail pages may be freed if there wasn't any mapping
1232 		 * like if add_to_swap() is running on a lru page that
1233 		 * had its mapping zapped. And freeing these pages
1234 		 * requires taking the lru_lock so we do the put_page
1235 		 * of the tail pages after the split is complete.
1236 		 */
1237 		put_page(page_tail);
1238 	}
1239 
1240 	/*
1241 	 * Only the head page (now become a regular page) is required
1242 	 * to be pinned by the caller.
1243 	 */
1244 	BUG_ON(page_count(page) <= 0);
1245 }
1246 
1247 static int __split_huge_page_map(struct page *page,
1248 				 struct vm_area_struct *vma,
1249 				 unsigned long address)
1250 {
1251 	struct mm_struct *mm = vma->vm_mm;
1252 	pmd_t *pmd, _pmd;
1253 	int ret = 0, i;
1254 	pgtable_t pgtable;
1255 	unsigned long haddr;
1256 
1257 	spin_lock(&mm->page_table_lock);
1258 	pmd = page_check_address_pmd(page, mm, address,
1259 				     PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
1260 	if (pmd) {
1261 		pgtable = get_pmd_huge_pte(mm);
1262 		pmd_populate(mm, &_pmd, pgtable);
1263 
1264 		for (i = 0, haddr = address; i < HPAGE_PMD_NR;
1265 		     i++, haddr += PAGE_SIZE) {
1266 			pte_t *pte, entry;
1267 			BUG_ON(PageCompound(page+i));
1268 			entry = mk_pte(page + i, vma->vm_page_prot);
1269 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1270 			if (!pmd_write(*pmd))
1271 				entry = pte_wrprotect(entry);
1272 			else
1273 				BUG_ON(page_mapcount(page) != 1);
1274 			if (!pmd_young(*pmd))
1275 				entry = pte_mkold(entry);
1276 			pte = pte_offset_map(&_pmd, haddr);
1277 			BUG_ON(!pte_none(*pte));
1278 			set_pte_at(mm, haddr, pte, entry);
1279 			pte_unmap(pte);
1280 		}
1281 
1282 		mm->nr_ptes++;
1283 		smp_wmb(); /* make pte visible before pmd */
1284 		/*
1285 		 * Up to this point the pmd is present and huge and
1286 		 * userland has the whole access to the hugepage
1287 		 * during the split (which happens in place). If we
1288 		 * overwrite the pmd with the not-huge version
1289 		 * pointing to the pte here (which of course we could
1290 		 * if all CPUs were bug free), userland could trigger
1291 		 * a small page size TLB miss on the small sized TLB
1292 		 * while the hugepage TLB entry is still established
1293 		 * in the huge TLB. Some CPU doesn't like that. See
1294 		 * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1295 		 * Erratum 383 on page 93. Intel should be safe but is
1296 		 * also warns that it's only safe if the permission
1297 		 * and cache attributes of the two entries loaded in
1298 		 * the two TLB is identical (which should be the case
1299 		 * here). But it is generally safer to never allow
1300 		 * small and huge TLB entries for the same virtual
1301 		 * address to be loaded simultaneously. So instead of
1302 		 * doing "pmd_populate(); flush_tlb_range();" we first
1303 		 * mark the current pmd notpresent (atomically because
1304 		 * here the pmd_trans_huge and pmd_trans_splitting
1305 		 * must remain set at all times on the pmd until the
1306 		 * split is complete for this pmd), then we flush the
1307 		 * SMP TLB and finally we write the non-huge version
1308 		 * of the pmd entry with pmd_populate.
1309 		 */
1310 		set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd));
1311 		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
1312 		pmd_populate(mm, pmd, pgtable);
1313 		ret = 1;
1314 	}
1315 	spin_unlock(&mm->page_table_lock);
1316 
1317 	return ret;
1318 }
1319 
1320 /* must be called with anon_vma->root->lock hold */
1321 static void __split_huge_page(struct page *page,
1322 			      struct anon_vma *anon_vma)
1323 {
1324 	int mapcount, mapcount2;
1325 	struct anon_vma_chain *avc;
1326 
1327 	BUG_ON(!PageHead(page));
1328 	BUG_ON(PageTail(page));
1329 
1330 	mapcount = 0;
1331 	list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1332 		struct vm_area_struct *vma = avc->vma;
1333 		unsigned long addr = vma_address(page, vma);
1334 		BUG_ON(is_vma_temporary_stack(vma));
1335 		if (addr == -EFAULT)
1336 			continue;
1337 		mapcount += __split_huge_page_splitting(page, vma, addr);
1338 	}
1339 	/*
1340 	 * It is critical that new vmas are added to the tail of the
1341 	 * anon_vma list. This guarantes that if copy_huge_pmd() runs
1342 	 * and establishes a child pmd before
1343 	 * __split_huge_page_splitting() freezes the parent pmd (so if
1344 	 * we fail to prevent copy_huge_pmd() from running until the
1345 	 * whole __split_huge_page() is complete), we will still see
1346 	 * the newly established pmd of the child later during the
1347 	 * walk, to be able to set it as pmd_trans_splitting too.
1348 	 */
1349 	if (mapcount != page_mapcount(page))
1350 		printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1351 		       mapcount, page_mapcount(page));
1352 	BUG_ON(mapcount != page_mapcount(page));
1353 
1354 	__split_huge_page_refcount(page);
1355 
1356 	mapcount2 = 0;
1357 	list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1358 		struct vm_area_struct *vma = avc->vma;
1359 		unsigned long addr = vma_address(page, vma);
1360 		BUG_ON(is_vma_temporary_stack(vma));
1361 		if (addr == -EFAULT)
1362 			continue;
1363 		mapcount2 += __split_huge_page_map(page, vma, addr);
1364 	}
1365 	if (mapcount != mapcount2)
1366 		printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
1367 		       mapcount, mapcount2, page_mapcount(page));
1368 	BUG_ON(mapcount != mapcount2);
1369 }
1370 
1371 int split_huge_page(struct page *page)
1372 {
1373 	struct anon_vma *anon_vma;
1374 	int ret = 1;
1375 
1376 	BUG_ON(!PageAnon(page));
1377 	anon_vma = page_lock_anon_vma(page);
1378 	if (!anon_vma)
1379 		goto out;
1380 	ret = 0;
1381 	if (!PageCompound(page))
1382 		goto out_unlock;
1383 
1384 	BUG_ON(!PageSwapBacked(page));
1385 	__split_huge_page(page, anon_vma);
1386 
1387 	BUG_ON(PageCompound(page));
1388 out_unlock:
1389 	page_unlock_anon_vma(anon_vma);
1390 out:
1391 	return ret;
1392 }
1393 
1394 int hugepage_madvise(struct vm_area_struct *vma,
1395 		     unsigned long *vm_flags, int advice)
1396 {
1397 	switch (advice) {
1398 	case MADV_HUGEPAGE:
1399 		/*
1400 		 * Be somewhat over-protective like KSM for now!
1401 		 */
1402 		if (*vm_flags & (VM_HUGEPAGE |
1403 				 VM_SHARED   | VM_MAYSHARE   |
1404 				 VM_PFNMAP   | VM_IO      | VM_DONTEXPAND |
1405 				 VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
1406 				 VM_MIXEDMAP | VM_SAO))
1407 			return -EINVAL;
1408 		*vm_flags &= ~VM_NOHUGEPAGE;
1409 		*vm_flags |= VM_HUGEPAGE;
1410 		/*
1411 		 * If the vma become good for khugepaged to scan,
1412 		 * register it here without waiting a page fault that
1413 		 * may not happen any time soon.
1414 		 */
1415 		if (unlikely(khugepaged_enter_vma_merge(vma)))
1416 			return -ENOMEM;
1417 		break;
1418 	case MADV_NOHUGEPAGE:
1419 		/*
1420 		 * Be somewhat over-protective like KSM for now!
1421 		 */
1422 		if (*vm_flags & (VM_NOHUGEPAGE |
1423 				 VM_SHARED   | VM_MAYSHARE   |
1424 				 VM_PFNMAP   | VM_IO      | VM_DONTEXPAND |
1425 				 VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
1426 				 VM_MIXEDMAP | VM_SAO))
1427 			return -EINVAL;
1428 		*vm_flags &= ~VM_HUGEPAGE;
1429 		*vm_flags |= VM_NOHUGEPAGE;
1430 		/*
1431 		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
1432 		 * this vma even if we leave the mm registered in khugepaged if
1433 		 * it got registered before VM_NOHUGEPAGE was set.
1434 		 */
1435 		break;
1436 	}
1437 
1438 	return 0;
1439 }
1440 
1441 static int __init khugepaged_slab_init(void)
1442 {
1443 	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1444 					  sizeof(struct mm_slot),
1445 					  __alignof__(struct mm_slot), 0, NULL);
1446 	if (!mm_slot_cache)
1447 		return -ENOMEM;
1448 
1449 	return 0;
1450 }
1451 
1452 static void __init khugepaged_slab_free(void)
1453 {
1454 	kmem_cache_destroy(mm_slot_cache);
1455 	mm_slot_cache = NULL;
1456 }
1457 
1458 static inline struct mm_slot *alloc_mm_slot(void)
1459 {
1460 	if (!mm_slot_cache)	/* initialization failed */
1461 		return NULL;
1462 	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
1463 }
1464 
1465 static inline void free_mm_slot(struct mm_slot *mm_slot)
1466 {
1467 	kmem_cache_free(mm_slot_cache, mm_slot);
1468 }
1469 
1470 static int __init mm_slots_hash_init(void)
1471 {
1472 	mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
1473 				GFP_KERNEL);
1474 	if (!mm_slots_hash)
1475 		return -ENOMEM;
1476 	return 0;
1477 }
1478 
1479 #if 0
1480 static void __init mm_slots_hash_free(void)
1481 {
1482 	kfree(mm_slots_hash);
1483 	mm_slots_hash = NULL;
1484 }
1485 #endif
1486 
1487 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1488 {
1489 	struct mm_slot *mm_slot;
1490 	struct hlist_head *bucket;
1491 	struct hlist_node *node;
1492 
1493 	bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1494 				% MM_SLOTS_HASH_HEADS];
1495 	hlist_for_each_entry(mm_slot, node, bucket, hash) {
1496 		if (mm == mm_slot->mm)
1497 			return mm_slot;
1498 	}
1499 	return NULL;
1500 }
1501 
1502 static void insert_to_mm_slots_hash(struct mm_struct *mm,
1503 				    struct mm_slot *mm_slot)
1504 {
1505 	struct hlist_head *bucket;
1506 
1507 	bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1508 				% MM_SLOTS_HASH_HEADS];
1509 	mm_slot->mm = mm;
1510 	hlist_add_head(&mm_slot->hash, bucket);
1511 }
1512 
1513 static inline int khugepaged_test_exit(struct mm_struct *mm)
1514 {
1515 	return atomic_read(&mm->mm_users) == 0;
1516 }
1517 
1518 int __khugepaged_enter(struct mm_struct *mm)
1519 {
1520 	struct mm_slot *mm_slot;
1521 	int wakeup;
1522 
1523 	mm_slot = alloc_mm_slot();
1524 	if (!mm_slot)
1525 		return -ENOMEM;
1526 
1527 	/* __khugepaged_exit() must not run from under us */
1528 	VM_BUG_ON(khugepaged_test_exit(mm));
1529 	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
1530 		free_mm_slot(mm_slot);
1531 		return 0;
1532 	}
1533 
1534 	spin_lock(&khugepaged_mm_lock);
1535 	insert_to_mm_slots_hash(mm, mm_slot);
1536 	/*
1537 	 * Insert just behind the scanning cursor, to let the area settle
1538 	 * down a little.
1539 	 */
1540 	wakeup = list_empty(&khugepaged_scan.mm_head);
1541 	list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
1542 	spin_unlock(&khugepaged_mm_lock);
1543 
1544 	atomic_inc(&mm->mm_count);
1545 	if (wakeup)
1546 		wake_up_interruptible(&khugepaged_wait);
1547 
1548 	return 0;
1549 }
1550 
1551 int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
1552 {
1553 	unsigned long hstart, hend;
1554 	if (!vma->anon_vma)
1555 		/*
1556 		 * Not yet faulted in so we will register later in the
1557 		 * page fault if needed.
1558 		 */
1559 		return 0;
1560 	if (vma->vm_file || vma->vm_ops)
1561 		/* khugepaged not yet working on file or special mappings */
1562 		return 0;
1563 	VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
1564 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1565 	hend = vma->vm_end & HPAGE_PMD_MASK;
1566 	if (hstart < hend)
1567 		return khugepaged_enter(vma);
1568 	return 0;
1569 }
1570 
1571 void __khugepaged_exit(struct mm_struct *mm)
1572 {
1573 	struct mm_slot *mm_slot;
1574 	int free = 0;
1575 
1576 	spin_lock(&khugepaged_mm_lock);
1577 	mm_slot = get_mm_slot(mm);
1578 	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
1579 		hlist_del(&mm_slot->hash);
1580 		list_del(&mm_slot->mm_node);
1581 		free = 1;
1582 	}
1583 
1584 	if (free) {
1585 		spin_unlock(&khugepaged_mm_lock);
1586 		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1587 		free_mm_slot(mm_slot);
1588 		mmdrop(mm);
1589 	} else if (mm_slot) {
1590 		spin_unlock(&khugepaged_mm_lock);
1591 		/*
1592 		 * This is required to serialize against
1593 		 * khugepaged_test_exit() (which is guaranteed to run
1594 		 * under mmap sem read mode). Stop here (after we
1595 		 * return all pagetables will be destroyed) until
1596 		 * khugepaged has finished working on the pagetables
1597 		 * under the mmap_sem.
1598 		 */
1599 		down_write(&mm->mmap_sem);
1600 		up_write(&mm->mmap_sem);
1601 	} else
1602 		spin_unlock(&khugepaged_mm_lock);
1603 }
1604 
1605 static void release_pte_page(struct page *page)
1606 {
1607 	/* 0 stands for page_is_file_cache(page) == false */
1608 	dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
1609 	unlock_page(page);
1610 	putback_lru_page(page);
1611 }
1612 
1613 static void release_pte_pages(pte_t *pte, pte_t *_pte)
1614 {
1615 	while (--_pte >= pte) {
1616 		pte_t pteval = *_pte;
1617 		if (!pte_none(pteval))
1618 			release_pte_page(pte_page(pteval));
1619 	}
1620 }
1621 
1622 static void release_all_pte_pages(pte_t *pte)
1623 {
1624 	release_pte_pages(pte, pte + HPAGE_PMD_NR);
1625 }
1626 
1627 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
1628 					unsigned long address,
1629 					pte_t *pte)
1630 {
1631 	struct page *page;
1632 	pte_t *_pte;
1633 	int referenced = 0, isolated = 0, none = 0;
1634 	for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
1635 	     _pte++, address += PAGE_SIZE) {
1636 		pte_t pteval = *_pte;
1637 		if (pte_none(pteval)) {
1638 			if (++none <= khugepaged_max_ptes_none)
1639 				continue;
1640 			else {
1641 				release_pte_pages(pte, _pte);
1642 				goto out;
1643 			}
1644 		}
1645 		if (!pte_present(pteval) || !pte_write(pteval)) {
1646 			release_pte_pages(pte, _pte);
1647 			goto out;
1648 		}
1649 		page = vm_normal_page(vma, address, pteval);
1650 		if (unlikely(!page)) {
1651 			release_pte_pages(pte, _pte);
1652 			goto out;
1653 		}
1654 		VM_BUG_ON(PageCompound(page));
1655 		BUG_ON(!PageAnon(page));
1656 		VM_BUG_ON(!PageSwapBacked(page));
1657 
1658 		/* cannot use mapcount: can't collapse if there's a gup pin */
1659 		if (page_count(page) != 1) {
1660 			release_pte_pages(pte, _pte);
1661 			goto out;
1662 		}
1663 		/*
1664 		 * We can do it before isolate_lru_page because the
1665 		 * page can't be freed from under us. NOTE: PG_lock
1666 		 * is needed to serialize against split_huge_page
1667 		 * when invoked from the VM.
1668 		 */
1669 		if (!trylock_page(page)) {
1670 			release_pte_pages(pte, _pte);
1671 			goto out;
1672 		}
1673 		/*
1674 		 * Isolate the page to avoid collapsing an hugepage
1675 		 * currently in use by the VM.
1676 		 */
1677 		if (isolate_lru_page(page)) {
1678 			unlock_page(page);
1679 			release_pte_pages(pte, _pte);
1680 			goto out;
1681 		}
1682 		/* 0 stands for page_is_file_cache(page) == false */
1683 		inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
1684 		VM_BUG_ON(!PageLocked(page));
1685 		VM_BUG_ON(PageLRU(page));
1686 
1687 		/* If there is no mapped pte young don't collapse the page */
1688 		if (pte_young(pteval) || PageReferenced(page) ||
1689 		    mmu_notifier_test_young(vma->vm_mm, address))
1690 			referenced = 1;
1691 	}
1692 	if (unlikely(!referenced))
1693 		release_all_pte_pages(pte);
1694 	else
1695 		isolated = 1;
1696 out:
1697 	return isolated;
1698 }
1699 
1700 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
1701 				      struct vm_area_struct *vma,
1702 				      unsigned long address,
1703 				      spinlock_t *ptl)
1704 {
1705 	pte_t *_pte;
1706 	for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
1707 		pte_t pteval = *_pte;
1708 		struct page *src_page;
1709 
1710 		if (pte_none(pteval)) {
1711 			clear_user_highpage(page, address);
1712 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
1713 		} else {
1714 			src_page = pte_page(pteval);
1715 			copy_user_highpage(page, src_page, address, vma);
1716 			VM_BUG_ON(page_mapcount(src_page) != 1);
1717 			VM_BUG_ON(page_count(src_page) != 2);
1718 			release_pte_page(src_page);
1719 			/*
1720 			 * ptl mostly unnecessary, but preempt has to
1721 			 * be disabled to update the per-cpu stats
1722 			 * inside page_remove_rmap().
1723 			 */
1724 			spin_lock(ptl);
1725 			/*
1726 			 * paravirt calls inside pte_clear here are
1727 			 * superfluous.
1728 			 */
1729 			pte_clear(vma->vm_mm, address, _pte);
1730 			page_remove_rmap(src_page);
1731 			spin_unlock(ptl);
1732 			free_page_and_swap_cache(src_page);
1733 		}
1734 
1735 		address += PAGE_SIZE;
1736 		page++;
1737 	}
1738 }
1739 
1740 static void collapse_huge_page(struct mm_struct *mm,
1741 			       unsigned long address,
1742 			       struct page **hpage,
1743 			       struct vm_area_struct *vma)
1744 {
1745 	pgd_t *pgd;
1746 	pud_t *pud;
1747 	pmd_t *pmd, _pmd;
1748 	pte_t *pte;
1749 	pgtable_t pgtable;
1750 	struct page *new_page;
1751 	spinlock_t *ptl;
1752 	int isolated;
1753 	unsigned long hstart, hend;
1754 
1755 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1756 #ifndef CONFIG_NUMA
1757 	VM_BUG_ON(!*hpage);
1758 	new_page = *hpage;
1759 #else
1760 	VM_BUG_ON(*hpage);
1761 	/*
1762 	 * Allocate the page while the vma is still valid and under
1763 	 * the mmap_sem read mode so there is no memory allocation
1764 	 * later when we take the mmap_sem in write mode. This is more
1765 	 * friendly behavior (OTOH it may actually hide bugs) to
1766 	 * filesystems in userland with daemons allocating memory in
1767 	 * the userland I/O paths.  Allocating memory with the
1768 	 * mmap_sem in read mode is good idea also to allow greater
1769 	 * scalability.
1770 	 */
1771 	new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address);
1772 	if (unlikely(!new_page)) {
1773 		up_read(&mm->mmap_sem);
1774 		*hpage = ERR_PTR(-ENOMEM);
1775 		return;
1776 	}
1777 #endif
1778 	if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1779 		up_read(&mm->mmap_sem);
1780 		put_page(new_page);
1781 		return;
1782 	}
1783 
1784 	/* after allocating the hugepage upgrade to mmap_sem write mode */
1785 	up_read(&mm->mmap_sem);
1786 
1787 	/*
1788 	 * Prevent all access to pagetables with the exception of
1789 	 * gup_fast later hanlded by the ptep_clear_flush and the VM
1790 	 * handled by the anon_vma lock + PG_lock.
1791 	 */
1792 	down_write(&mm->mmap_sem);
1793 	if (unlikely(khugepaged_test_exit(mm)))
1794 		goto out;
1795 
1796 	vma = find_vma(mm, address);
1797 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1798 	hend = vma->vm_end & HPAGE_PMD_MASK;
1799 	if (address < hstart || address + HPAGE_PMD_SIZE > hend)
1800 		goto out;
1801 
1802 	if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
1803 	    (vma->vm_flags & VM_NOHUGEPAGE))
1804 		goto out;
1805 
1806 	/* VM_PFNMAP vmas may have vm_ops null but vm_file set */
1807 	if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
1808 		goto out;
1809 	VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
1810 
1811 	pgd = pgd_offset(mm, address);
1812 	if (!pgd_present(*pgd))
1813 		goto out;
1814 
1815 	pud = pud_offset(pgd, address);
1816 	if (!pud_present(*pud))
1817 		goto out;
1818 
1819 	pmd = pmd_offset(pud, address);
1820 	/* pmd can't go away or become huge under us */
1821 	if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
1822 		goto out;
1823 
1824 	anon_vma_lock(vma->anon_vma);
1825 
1826 	pte = pte_offset_map(pmd, address);
1827 	ptl = pte_lockptr(mm, pmd);
1828 
1829 	spin_lock(&mm->page_table_lock); /* probably unnecessary */
1830 	/*
1831 	 * After this gup_fast can't run anymore. This also removes
1832 	 * any huge TLB entry from the CPU so we won't allow
1833 	 * huge and small TLB entries for the same virtual address
1834 	 * to avoid the risk of CPU bugs in that area.
1835 	 */
1836 	_pmd = pmdp_clear_flush_notify(vma, address, pmd);
1837 	spin_unlock(&mm->page_table_lock);
1838 
1839 	spin_lock(ptl);
1840 	isolated = __collapse_huge_page_isolate(vma, address, pte);
1841 	spin_unlock(ptl);
1842 
1843 	if (unlikely(!isolated)) {
1844 		pte_unmap(pte);
1845 		spin_lock(&mm->page_table_lock);
1846 		BUG_ON(!pmd_none(*pmd));
1847 		set_pmd_at(mm, address, pmd, _pmd);
1848 		spin_unlock(&mm->page_table_lock);
1849 		anon_vma_unlock(vma->anon_vma);
1850 		mem_cgroup_uncharge_page(new_page);
1851 		goto out;
1852 	}
1853 
1854 	/*
1855 	 * All pages are isolated and locked so anon_vma rmap
1856 	 * can't run anymore.
1857 	 */
1858 	anon_vma_unlock(vma->anon_vma);
1859 
1860 	__collapse_huge_page_copy(pte, new_page, vma, address, ptl);
1861 	pte_unmap(pte);
1862 	__SetPageUptodate(new_page);
1863 	pgtable = pmd_pgtable(_pmd);
1864 	VM_BUG_ON(page_count(pgtable) != 1);
1865 	VM_BUG_ON(page_mapcount(pgtable) != 0);
1866 
1867 	_pmd = mk_pmd(new_page, vma->vm_page_prot);
1868 	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1869 	_pmd = pmd_mkhuge(_pmd);
1870 
1871 	/*
1872 	 * spin_lock() below is not the equivalent of smp_wmb(), so
1873 	 * this is needed to avoid the copy_huge_page writes to become
1874 	 * visible after the set_pmd_at() write.
1875 	 */
1876 	smp_wmb();
1877 
1878 	spin_lock(&mm->page_table_lock);
1879 	BUG_ON(!pmd_none(*pmd));
1880 	page_add_new_anon_rmap(new_page, vma, address);
1881 	set_pmd_at(mm, address, pmd, _pmd);
1882 	update_mmu_cache(vma, address, entry);
1883 	prepare_pmd_huge_pte(pgtable, mm);
1884 	mm->nr_ptes--;
1885 	spin_unlock(&mm->page_table_lock);
1886 
1887 #ifndef CONFIG_NUMA
1888 	*hpage = NULL;
1889 #endif
1890 	khugepaged_pages_collapsed++;
1891 out_up_write:
1892 	up_write(&mm->mmap_sem);
1893 	return;
1894 
1895 out:
1896 #ifdef CONFIG_NUMA
1897 	put_page(new_page);
1898 #endif
1899 	goto out_up_write;
1900 }
1901 
1902 static int khugepaged_scan_pmd(struct mm_struct *mm,
1903 			       struct vm_area_struct *vma,
1904 			       unsigned long address,
1905 			       struct page **hpage)
1906 {
1907 	pgd_t *pgd;
1908 	pud_t *pud;
1909 	pmd_t *pmd;
1910 	pte_t *pte, *_pte;
1911 	int ret = 0, referenced = 0, none = 0;
1912 	struct page *page;
1913 	unsigned long _address;
1914 	spinlock_t *ptl;
1915 
1916 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1917 
1918 	pgd = pgd_offset(mm, address);
1919 	if (!pgd_present(*pgd))
1920 		goto out;
1921 
1922 	pud = pud_offset(pgd, address);
1923 	if (!pud_present(*pud))
1924 		goto out;
1925 
1926 	pmd = pmd_offset(pud, address);
1927 	if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
1928 		goto out;
1929 
1930 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1931 	for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1932 	     _pte++, _address += PAGE_SIZE) {
1933 		pte_t pteval = *_pte;
1934 		if (pte_none(pteval)) {
1935 			if (++none <= khugepaged_max_ptes_none)
1936 				continue;
1937 			else
1938 				goto out_unmap;
1939 		}
1940 		if (!pte_present(pteval) || !pte_write(pteval))
1941 			goto out_unmap;
1942 		page = vm_normal_page(vma, _address, pteval);
1943 		if (unlikely(!page))
1944 			goto out_unmap;
1945 		VM_BUG_ON(PageCompound(page));
1946 		if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
1947 			goto out_unmap;
1948 		/* cannot use mapcount: can't collapse if there's a gup pin */
1949 		if (page_count(page) != 1)
1950 			goto out_unmap;
1951 		if (pte_young(pteval) || PageReferenced(page) ||
1952 		    mmu_notifier_test_young(vma->vm_mm, address))
1953 			referenced = 1;
1954 	}
1955 	if (referenced)
1956 		ret = 1;
1957 out_unmap:
1958 	pte_unmap_unlock(pte, ptl);
1959 	if (ret)
1960 		/* collapse_huge_page will return with the mmap_sem released */
1961 		collapse_huge_page(mm, address, hpage, vma);
1962 out:
1963 	return ret;
1964 }
1965 
1966 static void collect_mm_slot(struct mm_slot *mm_slot)
1967 {
1968 	struct mm_struct *mm = mm_slot->mm;
1969 
1970 	VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
1971 
1972 	if (khugepaged_test_exit(mm)) {
1973 		/* free mm_slot */
1974 		hlist_del(&mm_slot->hash);
1975 		list_del(&mm_slot->mm_node);
1976 
1977 		/*
1978 		 * Not strictly needed because the mm exited already.
1979 		 *
1980 		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1981 		 */
1982 
1983 		/* khugepaged_mm_lock actually not necessary for the below */
1984 		free_mm_slot(mm_slot);
1985 		mmdrop(mm);
1986 	}
1987 }
1988 
1989 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1990 					    struct page **hpage)
1991 {
1992 	struct mm_slot *mm_slot;
1993 	struct mm_struct *mm;
1994 	struct vm_area_struct *vma;
1995 	int progress = 0;
1996 
1997 	VM_BUG_ON(!pages);
1998 	VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
1999 
2000 	if (khugepaged_scan.mm_slot)
2001 		mm_slot = khugepaged_scan.mm_slot;
2002 	else {
2003 		mm_slot = list_entry(khugepaged_scan.mm_head.next,
2004 				     struct mm_slot, mm_node);
2005 		khugepaged_scan.address = 0;
2006 		khugepaged_scan.mm_slot = mm_slot;
2007 	}
2008 	spin_unlock(&khugepaged_mm_lock);
2009 
2010 	mm = mm_slot->mm;
2011 	down_read(&mm->mmap_sem);
2012 	if (unlikely(khugepaged_test_exit(mm)))
2013 		vma = NULL;
2014 	else
2015 		vma = find_vma(mm, khugepaged_scan.address);
2016 
2017 	progress++;
2018 	for (; vma; vma = vma->vm_next) {
2019 		unsigned long hstart, hend;
2020 
2021 		cond_resched();
2022 		if (unlikely(khugepaged_test_exit(mm))) {
2023 			progress++;
2024 			break;
2025 		}
2026 
2027 		if ((!(vma->vm_flags & VM_HUGEPAGE) &&
2028 		     !khugepaged_always()) ||
2029 		    (vma->vm_flags & VM_NOHUGEPAGE)) {
2030 			progress++;
2031 			continue;
2032 		}
2033 
2034 		/* VM_PFNMAP vmas may have vm_ops null but vm_file set */
2035 		if (!vma->anon_vma || vma->vm_ops || vma->vm_file) {
2036 			khugepaged_scan.address = vma->vm_end;
2037 			progress++;
2038 			continue;
2039 		}
2040 		VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
2041 
2042 		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2043 		hend = vma->vm_end & HPAGE_PMD_MASK;
2044 		if (hstart >= hend) {
2045 			progress++;
2046 			continue;
2047 		}
2048 		if (khugepaged_scan.address < hstart)
2049 			khugepaged_scan.address = hstart;
2050 		if (khugepaged_scan.address > hend) {
2051 			khugepaged_scan.address = hend + HPAGE_PMD_SIZE;
2052 			progress++;
2053 			continue;
2054 		}
2055 		BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2056 
2057 		while (khugepaged_scan.address < hend) {
2058 			int ret;
2059 			cond_resched();
2060 			if (unlikely(khugepaged_test_exit(mm)))
2061 				goto breakouterloop;
2062 
2063 			VM_BUG_ON(khugepaged_scan.address < hstart ||
2064 				  khugepaged_scan.address + HPAGE_PMD_SIZE >
2065 				  hend);
2066 			ret = khugepaged_scan_pmd(mm, vma,
2067 						  khugepaged_scan.address,
2068 						  hpage);
2069 			/* move to next address */
2070 			khugepaged_scan.address += HPAGE_PMD_SIZE;
2071 			progress += HPAGE_PMD_NR;
2072 			if (ret)
2073 				/* we released mmap_sem so break loop */
2074 				goto breakouterloop_mmap_sem;
2075 			if (progress >= pages)
2076 				goto breakouterloop;
2077 		}
2078 	}
2079 breakouterloop:
2080 	up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2081 breakouterloop_mmap_sem:
2082 
2083 	spin_lock(&khugepaged_mm_lock);
2084 	BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2085 	/*
2086 	 * Release the current mm_slot if this mm is about to die, or
2087 	 * if we scanned all vmas of this mm.
2088 	 */
2089 	if (khugepaged_test_exit(mm) || !vma) {
2090 		/*
2091 		 * Make sure that if mm_users is reaching zero while
2092 		 * khugepaged runs here, khugepaged_exit will find
2093 		 * mm_slot not pointing to the exiting mm.
2094 		 */
2095 		if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2096 			khugepaged_scan.mm_slot = list_entry(
2097 				mm_slot->mm_node.next,
2098 				struct mm_slot, mm_node);
2099 			khugepaged_scan.address = 0;
2100 		} else {
2101 			khugepaged_scan.mm_slot = NULL;
2102 			khugepaged_full_scans++;
2103 		}
2104 
2105 		collect_mm_slot(mm_slot);
2106 	}
2107 
2108 	return progress;
2109 }
2110 
2111 static int khugepaged_has_work(void)
2112 {
2113 	return !list_empty(&khugepaged_scan.mm_head) &&
2114 		khugepaged_enabled();
2115 }
2116 
2117 static int khugepaged_wait_event(void)
2118 {
2119 	return !list_empty(&khugepaged_scan.mm_head) ||
2120 		!khugepaged_enabled();
2121 }
2122 
2123 static void khugepaged_do_scan(struct page **hpage)
2124 {
2125 	unsigned int progress = 0, pass_through_head = 0;
2126 	unsigned int pages = khugepaged_pages_to_scan;
2127 
2128 	barrier(); /* write khugepaged_pages_to_scan to local stack */
2129 
2130 	while (progress < pages) {
2131 		cond_resched();
2132 
2133 #ifndef CONFIG_NUMA
2134 		if (!*hpage) {
2135 			*hpage = alloc_hugepage(khugepaged_defrag());
2136 			if (unlikely(!*hpage))
2137 				break;
2138 		}
2139 #else
2140 		if (IS_ERR(*hpage))
2141 			break;
2142 #endif
2143 
2144 		if (unlikely(kthread_should_stop() || freezing(current)))
2145 			break;
2146 
2147 		spin_lock(&khugepaged_mm_lock);
2148 		if (!khugepaged_scan.mm_slot)
2149 			pass_through_head++;
2150 		if (khugepaged_has_work() &&
2151 		    pass_through_head < 2)
2152 			progress += khugepaged_scan_mm_slot(pages - progress,
2153 							    hpage);
2154 		else
2155 			progress = pages;
2156 		spin_unlock(&khugepaged_mm_lock);
2157 	}
2158 }
2159 
2160 static void khugepaged_alloc_sleep(void)
2161 {
2162 	DEFINE_WAIT(wait);
2163 	add_wait_queue(&khugepaged_wait, &wait);
2164 	schedule_timeout_interruptible(
2165 		msecs_to_jiffies(
2166 			khugepaged_alloc_sleep_millisecs));
2167 	remove_wait_queue(&khugepaged_wait, &wait);
2168 }
2169 
2170 #ifndef CONFIG_NUMA
2171 static struct page *khugepaged_alloc_hugepage(void)
2172 {
2173 	struct page *hpage;
2174 
2175 	do {
2176 		hpage = alloc_hugepage(khugepaged_defrag());
2177 		if (!hpage)
2178 			khugepaged_alloc_sleep();
2179 	} while (unlikely(!hpage) &&
2180 		 likely(khugepaged_enabled()));
2181 	return hpage;
2182 }
2183 #endif
2184 
2185 static void khugepaged_loop(void)
2186 {
2187 	struct page *hpage;
2188 
2189 #ifdef CONFIG_NUMA
2190 	hpage = NULL;
2191 #endif
2192 	while (likely(khugepaged_enabled())) {
2193 #ifndef CONFIG_NUMA
2194 		hpage = khugepaged_alloc_hugepage();
2195 		if (unlikely(!hpage))
2196 			break;
2197 #else
2198 		if (IS_ERR(hpage)) {
2199 			khugepaged_alloc_sleep();
2200 			hpage = NULL;
2201 		}
2202 #endif
2203 
2204 		khugepaged_do_scan(&hpage);
2205 #ifndef CONFIG_NUMA
2206 		if (hpage)
2207 			put_page(hpage);
2208 #endif
2209 		try_to_freeze();
2210 		if (unlikely(kthread_should_stop()))
2211 			break;
2212 		if (khugepaged_has_work()) {
2213 			DEFINE_WAIT(wait);
2214 			if (!khugepaged_scan_sleep_millisecs)
2215 				continue;
2216 			add_wait_queue(&khugepaged_wait, &wait);
2217 			schedule_timeout_interruptible(
2218 				msecs_to_jiffies(
2219 					khugepaged_scan_sleep_millisecs));
2220 			remove_wait_queue(&khugepaged_wait, &wait);
2221 		} else if (khugepaged_enabled())
2222 			wait_event_freezable(khugepaged_wait,
2223 					     khugepaged_wait_event());
2224 	}
2225 }
2226 
2227 static int khugepaged(void *none)
2228 {
2229 	struct mm_slot *mm_slot;
2230 
2231 	set_freezable();
2232 	set_user_nice(current, 19);
2233 
2234 	/* serialize with start_khugepaged() */
2235 	mutex_lock(&khugepaged_mutex);
2236 
2237 	for (;;) {
2238 		mutex_unlock(&khugepaged_mutex);
2239 		BUG_ON(khugepaged_thread != current);
2240 		khugepaged_loop();
2241 		BUG_ON(khugepaged_thread != current);
2242 
2243 		mutex_lock(&khugepaged_mutex);
2244 		if (!khugepaged_enabled())
2245 			break;
2246 		if (unlikely(kthread_should_stop()))
2247 			break;
2248 	}
2249 
2250 	spin_lock(&khugepaged_mm_lock);
2251 	mm_slot = khugepaged_scan.mm_slot;
2252 	khugepaged_scan.mm_slot = NULL;
2253 	if (mm_slot)
2254 		collect_mm_slot(mm_slot);
2255 	spin_unlock(&khugepaged_mm_lock);
2256 
2257 	khugepaged_thread = NULL;
2258 	mutex_unlock(&khugepaged_mutex);
2259 
2260 	return 0;
2261 }
2262 
2263 void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
2264 {
2265 	struct page *page;
2266 
2267 	spin_lock(&mm->page_table_lock);
2268 	if (unlikely(!pmd_trans_huge(*pmd))) {
2269 		spin_unlock(&mm->page_table_lock);
2270 		return;
2271 	}
2272 	page = pmd_page(*pmd);
2273 	VM_BUG_ON(!page_count(page));
2274 	get_page(page);
2275 	spin_unlock(&mm->page_table_lock);
2276 
2277 	split_huge_page(page);
2278 
2279 	put_page(page);
2280 	BUG_ON(pmd_trans_huge(*pmd));
2281 }
2282 
2283 static void split_huge_page_address(struct mm_struct *mm,
2284 				    unsigned long address)
2285 {
2286 	pgd_t *pgd;
2287 	pud_t *pud;
2288 	pmd_t *pmd;
2289 
2290 	VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2291 
2292 	pgd = pgd_offset(mm, address);
2293 	if (!pgd_present(*pgd))
2294 		return;
2295 
2296 	pud = pud_offset(pgd, address);
2297 	if (!pud_present(*pud))
2298 		return;
2299 
2300 	pmd = pmd_offset(pud, address);
2301 	if (!pmd_present(*pmd))
2302 		return;
2303 	/*
2304 	 * Caller holds the mmap_sem write mode, so a huge pmd cannot
2305 	 * materialize from under us.
2306 	 */
2307 	split_huge_page_pmd(mm, pmd);
2308 }
2309 
2310 void __vma_adjust_trans_huge(struct vm_area_struct *vma,
2311 			     unsigned long start,
2312 			     unsigned long end,
2313 			     long adjust_next)
2314 {
2315 	/*
2316 	 * If the new start address isn't hpage aligned and it could
2317 	 * previously contain an hugepage: check if we need to split
2318 	 * an huge pmd.
2319 	 */
2320 	if (start & ~HPAGE_PMD_MASK &&
2321 	    (start & HPAGE_PMD_MASK) >= vma->vm_start &&
2322 	    (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2323 		split_huge_page_address(vma->vm_mm, start);
2324 
2325 	/*
2326 	 * If the new end address isn't hpage aligned and it could
2327 	 * previously contain an hugepage: check if we need to split
2328 	 * an huge pmd.
2329 	 */
2330 	if (end & ~HPAGE_PMD_MASK &&
2331 	    (end & HPAGE_PMD_MASK) >= vma->vm_start &&
2332 	    (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2333 		split_huge_page_address(vma->vm_mm, end);
2334 
2335 	/*
2336 	 * If we're also updating the vma->vm_next->vm_start, if the new
2337 	 * vm_next->vm_start isn't page aligned and it could previously
2338 	 * contain an hugepage: check if we need to split an huge pmd.
2339 	 */
2340 	if (adjust_next > 0) {
2341 		struct vm_area_struct *next = vma->vm_next;
2342 		unsigned long nstart = next->vm_start;
2343 		nstart += adjust_next << PAGE_SHIFT;
2344 		if (nstart & ~HPAGE_PMD_MASK &&
2345 		    (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2346 		    (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2347 			split_huge_page_address(next->vm_mm, nstart);
2348 	}
2349 }
2350