xref: /linux/mm/hugetlb.c (revision 94e48d6aafef23143f92eadd010c505c49487576)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic hugetlb support.
4  * (C) Nadia Yvette Chambers, April 2004
5  */
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/memblock.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/sched/mm.h>
23 #include <linux/mmdebug.h>
24 #include <linux/sched/signal.h>
25 #include <linux/rmap.h>
26 #include <linux/string_helpers.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/jhash.h>
30 #include <linux/numa.h>
31 #include <linux/llist.h>
32 #include <linux/cma.h>
33 
34 #include <asm/page.h>
35 #include <asm/pgalloc.h>
36 #include <asm/tlb.h>
37 
38 #include <linux/io.h>
39 #include <linux/hugetlb.h>
40 #include <linux/hugetlb_cgroup.h>
41 #include <linux/node.h>
42 #include <linux/page_owner.h>
43 #include "internal.h"
44 
45 int hugetlb_max_hstate __read_mostly;
46 unsigned int default_hstate_idx;
47 struct hstate hstates[HUGE_MAX_HSTATE];
48 
49 #ifdef CONFIG_CMA
50 static struct cma *hugetlb_cma[MAX_NUMNODES];
51 #endif
52 static unsigned long hugetlb_cma_size __initdata;
53 
54 /*
55  * Minimum page order among possible hugepage sizes, set to a proper value
56  * at boot time.
57  */
58 static unsigned int minimum_order __read_mostly = UINT_MAX;
59 
60 __initdata LIST_HEAD(huge_boot_pages);
61 
62 /* for command line parsing */
63 static struct hstate * __initdata parsed_hstate;
64 static unsigned long __initdata default_hstate_max_huge_pages;
65 static bool __initdata parsed_valid_hugepagesz = true;
66 static bool __initdata parsed_default_hugepagesz;
67 
68 /*
69  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
70  * free_huge_pages, and surplus_huge_pages.
71  */
72 DEFINE_SPINLOCK(hugetlb_lock);
73 
74 /*
75  * Serializes faults on the same logical page.  This is used to
76  * prevent spurious OOMs when the hugepage pool is fully utilized.
77  */
78 static int num_fault_mutexes;
79 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
80 
81 /* Forward declaration */
82 static int hugetlb_acct_memory(struct hstate *h, long delta);
83 
84 static inline bool subpool_is_free(struct hugepage_subpool *spool)
85 {
86 	if (spool->count)
87 		return false;
88 	if (spool->max_hpages != -1)
89 		return spool->used_hpages == 0;
90 	if (spool->min_hpages != -1)
91 		return spool->rsv_hpages == spool->min_hpages;
92 
93 	return true;
94 }
95 
96 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
97 						unsigned long irq_flags)
98 {
99 	spin_unlock_irqrestore(&spool->lock, irq_flags);
100 
101 	/* If no pages are used, and no other handles to the subpool
102 	 * remain, give up any reservations based on minimum size and
103 	 * free the subpool */
104 	if (subpool_is_free(spool)) {
105 		if (spool->min_hpages != -1)
106 			hugetlb_acct_memory(spool->hstate,
107 						-spool->min_hpages);
108 		kfree(spool);
109 	}
110 }
111 
112 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
113 						long min_hpages)
114 {
115 	struct hugepage_subpool *spool;
116 
117 	spool = kzalloc(sizeof(*spool), GFP_KERNEL);
118 	if (!spool)
119 		return NULL;
120 
121 	spin_lock_init(&spool->lock);
122 	spool->count = 1;
123 	spool->max_hpages = max_hpages;
124 	spool->hstate = h;
125 	spool->min_hpages = min_hpages;
126 
127 	if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
128 		kfree(spool);
129 		return NULL;
130 	}
131 	spool->rsv_hpages = min_hpages;
132 
133 	return spool;
134 }
135 
136 void hugepage_put_subpool(struct hugepage_subpool *spool)
137 {
138 	unsigned long flags;
139 
140 	spin_lock_irqsave(&spool->lock, flags);
141 	BUG_ON(!spool->count);
142 	spool->count--;
143 	unlock_or_release_subpool(spool, flags);
144 }
145 
146 /*
147  * Subpool accounting for allocating and reserving pages.
148  * Return -ENOMEM if there are not enough resources to satisfy the
149  * request.  Otherwise, return the number of pages by which the
150  * global pools must be adjusted (upward).  The returned value may
151  * only be different than the passed value (delta) in the case where
152  * a subpool minimum size must be maintained.
153  */
154 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
155 				      long delta)
156 {
157 	long ret = delta;
158 
159 	if (!spool)
160 		return ret;
161 
162 	spin_lock_irq(&spool->lock);
163 
164 	if (spool->max_hpages != -1) {		/* maximum size accounting */
165 		if ((spool->used_hpages + delta) <= spool->max_hpages)
166 			spool->used_hpages += delta;
167 		else {
168 			ret = -ENOMEM;
169 			goto unlock_ret;
170 		}
171 	}
172 
173 	/* minimum size accounting */
174 	if (spool->min_hpages != -1 && spool->rsv_hpages) {
175 		if (delta > spool->rsv_hpages) {
176 			/*
177 			 * Asking for more reserves than those already taken on
178 			 * behalf of subpool.  Return difference.
179 			 */
180 			ret = delta - spool->rsv_hpages;
181 			spool->rsv_hpages = 0;
182 		} else {
183 			ret = 0;	/* reserves already accounted for */
184 			spool->rsv_hpages -= delta;
185 		}
186 	}
187 
188 unlock_ret:
189 	spin_unlock_irq(&spool->lock);
190 	return ret;
191 }
192 
193 /*
194  * Subpool accounting for freeing and unreserving pages.
195  * Return the number of global page reservations that must be dropped.
196  * The return value may only be different than the passed value (delta)
197  * in the case where a subpool minimum size must be maintained.
198  */
199 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
200 				       long delta)
201 {
202 	long ret = delta;
203 	unsigned long flags;
204 
205 	if (!spool)
206 		return delta;
207 
208 	spin_lock_irqsave(&spool->lock, flags);
209 
210 	if (spool->max_hpages != -1)		/* maximum size accounting */
211 		spool->used_hpages -= delta;
212 
213 	 /* minimum size accounting */
214 	if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
215 		if (spool->rsv_hpages + delta <= spool->min_hpages)
216 			ret = 0;
217 		else
218 			ret = spool->rsv_hpages + delta - spool->min_hpages;
219 
220 		spool->rsv_hpages += delta;
221 		if (spool->rsv_hpages > spool->min_hpages)
222 			spool->rsv_hpages = spool->min_hpages;
223 	}
224 
225 	/*
226 	 * If hugetlbfs_put_super couldn't free spool due to an outstanding
227 	 * quota reference, free it now.
228 	 */
229 	unlock_or_release_subpool(spool, flags);
230 
231 	return ret;
232 }
233 
234 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
235 {
236 	return HUGETLBFS_SB(inode->i_sb)->spool;
237 }
238 
239 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
240 {
241 	return subpool_inode(file_inode(vma->vm_file));
242 }
243 
244 /* Helper that removes a struct file_region from the resv_map cache and returns
245  * it for use.
246  */
247 static struct file_region *
248 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
249 {
250 	struct file_region *nrg = NULL;
251 
252 	VM_BUG_ON(resv->region_cache_count <= 0);
253 
254 	resv->region_cache_count--;
255 	nrg = list_first_entry(&resv->region_cache, struct file_region, link);
256 	list_del(&nrg->link);
257 
258 	nrg->from = from;
259 	nrg->to = to;
260 
261 	return nrg;
262 }
263 
264 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
265 					      struct file_region *rg)
266 {
267 #ifdef CONFIG_CGROUP_HUGETLB
268 	nrg->reservation_counter = rg->reservation_counter;
269 	nrg->css = rg->css;
270 	if (rg->css)
271 		css_get(rg->css);
272 #endif
273 }
274 
275 /* Helper that records hugetlb_cgroup uncharge info. */
276 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
277 						struct hstate *h,
278 						struct resv_map *resv,
279 						struct file_region *nrg)
280 {
281 #ifdef CONFIG_CGROUP_HUGETLB
282 	if (h_cg) {
283 		nrg->reservation_counter =
284 			&h_cg->rsvd_hugepage[hstate_index(h)];
285 		nrg->css = &h_cg->css;
286 		/*
287 		 * The caller will hold exactly one h_cg->css reference for the
288 		 * whole contiguous reservation region. But this area might be
289 		 * scattered when there are already some file_regions reside in
290 		 * it. As a result, many file_regions may share only one css
291 		 * reference. In order to ensure that one file_region must hold
292 		 * exactly one h_cg->css reference, we should do css_get for
293 		 * each file_region and leave the reference held by caller
294 		 * untouched.
295 		 */
296 		css_get(&h_cg->css);
297 		if (!resv->pages_per_hpage)
298 			resv->pages_per_hpage = pages_per_huge_page(h);
299 		/* pages_per_hpage should be the same for all entries in
300 		 * a resv_map.
301 		 */
302 		VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
303 	} else {
304 		nrg->reservation_counter = NULL;
305 		nrg->css = NULL;
306 	}
307 #endif
308 }
309 
310 static void put_uncharge_info(struct file_region *rg)
311 {
312 #ifdef CONFIG_CGROUP_HUGETLB
313 	if (rg->css)
314 		css_put(rg->css);
315 #endif
316 }
317 
318 static bool has_same_uncharge_info(struct file_region *rg,
319 				   struct file_region *org)
320 {
321 #ifdef CONFIG_CGROUP_HUGETLB
322 	return rg && org &&
323 	       rg->reservation_counter == org->reservation_counter &&
324 	       rg->css == org->css;
325 
326 #else
327 	return true;
328 #endif
329 }
330 
331 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
332 {
333 	struct file_region *nrg = NULL, *prg = NULL;
334 
335 	prg = list_prev_entry(rg, link);
336 	if (&prg->link != &resv->regions && prg->to == rg->from &&
337 	    has_same_uncharge_info(prg, rg)) {
338 		prg->to = rg->to;
339 
340 		list_del(&rg->link);
341 		put_uncharge_info(rg);
342 		kfree(rg);
343 
344 		rg = prg;
345 	}
346 
347 	nrg = list_next_entry(rg, link);
348 	if (&nrg->link != &resv->regions && nrg->from == rg->to &&
349 	    has_same_uncharge_info(nrg, rg)) {
350 		nrg->from = rg->from;
351 
352 		list_del(&rg->link);
353 		put_uncharge_info(rg);
354 		kfree(rg);
355 	}
356 }
357 
358 static inline long
359 hugetlb_resv_map_add(struct resv_map *map, struct file_region *rg, long from,
360 		     long to, struct hstate *h, struct hugetlb_cgroup *cg,
361 		     long *regions_needed)
362 {
363 	struct file_region *nrg;
364 
365 	if (!regions_needed) {
366 		nrg = get_file_region_entry_from_cache(map, from, to);
367 		record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
368 		list_add(&nrg->link, rg->link.prev);
369 		coalesce_file_region(map, nrg);
370 	} else
371 		*regions_needed += 1;
372 
373 	return to - from;
374 }
375 
376 /*
377  * Must be called with resv->lock held.
378  *
379  * Calling this with regions_needed != NULL will count the number of pages
380  * to be added but will not modify the linked list. And regions_needed will
381  * indicate the number of file_regions needed in the cache to carry out to add
382  * the regions for this range.
383  */
384 static long add_reservation_in_range(struct resv_map *resv, long f, long t,
385 				     struct hugetlb_cgroup *h_cg,
386 				     struct hstate *h, long *regions_needed)
387 {
388 	long add = 0;
389 	struct list_head *head = &resv->regions;
390 	long last_accounted_offset = f;
391 	struct file_region *rg = NULL, *trg = NULL;
392 
393 	if (regions_needed)
394 		*regions_needed = 0;
395 
396 	/* In this loop, we essentially handle an entry for the range
397 	 * [last_accounted_offset, rg->from), at every iteration, with some
398 	 * bounds checking.
399 	 */
400 	list_for_each_entry_safe(rg, trg, head, link) {
401 		/* Skip irrelevant regions that start before our range. */
402 		if (rg->from < f) {
403 			/* If this region ends after the last accounted offset,
404 			 * then we need to update last_accounted_offset.
405 			 */
406 			if (rg->to > last_accounted_offset)
407 				last_accounted_offset = rg->to;
408 			continue;
409 		}
410 
411 		/* When we find a region that starts beyond our range, we've
412 		 * finished.
413 		 */
414 		if (rg->from >= t)
415 			break;
416 
417 		/* Add an entry for last_accounted_offset -> rg->from, and
418 		 * update last_accounted_offset.
419 		 */
420 		if (rg->from > last_accounted_offset)
421 			add += hugetlb_resv_map_add(resv, rg,
422 						    last_accounted_offset,
423 						    rg->from, h, h_cg,
424 						    regions_needed);
425 
426 		last_accounted_offset = rg->to;
427 	}
428 
429 	/* Handle the case where our range extends beyond
430 	 * last_accounted_offset.
431 	 */
432 	if (last_accounted_offset < t)
433 		add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
434 					    t, h, h_cg, regions_needed);
435 
436 	VM_BUG_ON(add < 0);
437 	return add;
438 }
439 
440 /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
441  */
442 static int allocate_file_region_entries(struct resv_map *resv,
443 					int regions_needed)
444 	__must_hold(&resv->lock)
445 {
446 	struct list_head allocated_regions;
447 	int to_allocate = 0, i = 0;
448 	struct file_region *trg = NULL, *rg = NULL;
449 
450 	VM_BUG_ON(regions_needed < 0);
451 
452 	INIT_LIST_HEAD(&allocated_regions);
453 
454 	/*
455 	 * Check for sufficient descriptors in the cache to accommodate
456 	 * the number of in progress add operations plus regions_needed.
457 	 *
458 	 * This is a while loop because when we drop the lock, some other call
459 	 * to region_add or region_del may have consumed some region_entries,
460 	 * so we keep looping here until we finally have enough entries for
461 	 * (adds_in_progress + regions_needed).
462 	 */
463 	while (resv->region_cache_count <
464 	       (resv->adds_in_progress + regions_needed)) {
465 		to_allocate = resv->adds_in_progress + regions_needed -
466 			      resv->region_cache_count;
467 
468 		/* At this point, we should have enough entries in the cache
469 		 * for all the existing adds_in_progress. We should only be
470 		 * needing to allocate for regions_needed.
471 		 */
472 		VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
473 
474 		spin_unlock(&resv->lock);
475 		for (i = 0; i < to_allocate; i++) {
476 			trg = kmalloc(sizeof(*trg), GFP_KERNEL);
477 			if (!trg)
478 				goto out_of_memory;
479 			list_add(&trg->link, &allocated_regions);
480 		}
481 
482 		spin_lock(&resv->lock);
483 
484 		list_splice(&allocated_regions, &resv->region_cache);
485 		resv->region_cache_count += to_allocate;
486 	}
487 
488 	return 0;
489 
490 out_of_memory:
491 	list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
492 		list_del(&rg->link);
493 		kfree(rg);
494 	}
495 	return -ENOMEM;
496 }
497 
498 /*
499  * Add the huge page range represented by [f, t) to the reserve
500  * map.  Regions will be taken from the cache to fill in this range.
501  * Sufficient regions should exist in the cache due to the previous
502  * call to region_chg with the same range, but in some cases the cache will not
503  * have sufficient entries due to races with other code doing region_add or
504  * region_del.  The extra needed entries will be allocated.
505  *
506  * regions_needed is the out value provided by a previous call to region_chg.
507  *
508  * Return the number of new huge pages added to the map.  This number is greater
509  * than or equal to zero.  If file_region entries needed to be allocated for
510  * this operation and we were not able to allocate, it returns -ENOMEM.
511  * region_add of regions of length 1 never allocate file_regions and cannot
512  * fail; region_chg will always allocate at least 1 entry and a region_add for
513  * 1 page will only require at most 1 entry.
514  */
515 static long region_add(struct resv_map *resv, long f, long t,
516 		       long in_regions_needed, struct hstate *h,
517 		       struct hugetlb_cgroup *h_cg)
518 {
519 	long add = 0, actual_regions_needed = 0;
520 
521 	spin_lock(&resv->lock);
522 retry:
523 
524 	/* Count how many regions are actually needed to execute this add. */
525 	add_reservation_in_range(resv, f, t, NULL, NULL,
526 				 &actual_regions_needed);
527 
528 	/*
529 	 * Check for sufficient descriptors in the cache to accommodate
530 	 * this add operation. Note that actual_regions_needed may be greater
531 	 * than in_regions_needed, as the resv_map may have been modified since
532 	 * the region_chg call. In this case, we need to make sure that we
533 	 * allocate extra entries, such that we have enough for all the
534 	 * existing adds_in_progress, plus the excess needed for this
535 	 * operation.
536 	 */
537 	if (actual_regions_needed > in_regions_needed &&
538 	    resv->region_cache_count <
539 		    resv->adds_in_progress +
540 			    (actual_regions_needed - in_regions_needed)) {
541 		/* region_add operation of range 1 should never need to
542 		 * allocate file_region entries.
543 		 */
544 		VM_BUG_ON(t - f <= 1);
545 
546 		if (allocate_file_region_entries(
547 			    resv, actual_regions_needed - in_regions_needed)) {
548 			return -ENOMEM;
549 		}
550 
551 		goto retry;
552 	}
553 
554 	add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
555 
556 	resv->adds_in_progress -= in_regions_needed;
557 
558 	spin_unlock(&resv->lock);
559 	return add;
560 }
561 
562 /*
563  * Examine the existing reserve map and determine how many
564  * huge pages in the specified range [f, t) are NOT currently
565  * represented.  This routine is called before a subsequent
566  * call to region_add that will actually modify the reserve
567  * map to add the specified range [f, t).  region_chg does
568  * not change the number of huge pages represented by the
569  * map.  A number of new file_region structures is added to the cache as a
570  * placeholder, for the subsequent region_add call to use. At least 1
571  * file_region structure is added.
572  *
573  * out_regions_needed is the number of regions added to the
574  * resv->adds_in_progress.  This value needs to be provided to a follow up call
575  * to region_add or region_abort for proper accounting.
576  *
577  * Returns the number of huge pages that need to be added to the existing
578  * reservation map for the range [f, t).  This number is greater or equal to
579  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
580  * is needed and can not be allocated.
581  */
582 static long region_chg(struct resv_map *resv, long f, long t,
583 		       long *out_regions_needed)
584 {
585 	long chg = 0;
586 
587 	spin_lock(&resv->lock);
588 
589 	/* Count how many hugepages in this range are NOT represented. */
590 	chg = add_reservation_in_range(resv, f, t, NULL, NULL,
591 				       out_regions_needed);
592 
593 	if (*out_regions_needed == 0)
594 		*out_regions_needed = 1;
595 
596 	if (allocate_file_region_entries(resv, *out_regions_needed))
597 		return -ENOMEM;
598 
599 	resv->adds_in_progress += *out_regions_needed;
600 
601 	spin_unlock(&resv->lock);
602 	return chg;
603 }
604 
605 /*
606  * Abort the in progress add operation.  The adds_in_progress field
607  * of the resv_map keeps track of the operations in progress between
608  * calls to region_chg and region_add.  Operations are sometimes
609  * aborted after the call to region_chg.  In such cases, region_abort
610  * is called to decrement the adds_in_progress counter. regions_needed
611  * is the value returned by the region_chg call, it is used to decrement
612  * the adds_in_progress counter.
613  *
614  * NOTE: The range arguments [f, t) are not needed or used in this
615  * routine.  They are kept to make reading the calling code easier as
616  * arguments will match the associated region_chg call.
617  */
618 static void region_abort(struct resv_map *resv, long f, long t,
619 			 long regions_needed)
620 {
621 	spin_lock(&resv->lock);
622 	VM_BUG_ON(!resv->region_cache_count);
623 	resv->adds_in_progress -= regions_needed;
624 	spin_unlock(&resv->lock);
625 }
626 
627 /*
628  * Delete the specified range [f, t) from the reserve map.  If the
629  * t parameter is LONG_MAX, this indicates that ALL regions after f
630  * should be deleted.  Locate the regions which intersect [f, t)
631  * and either trim, delete or split the existing regions.
632  *
633  * Returns the number of huge pages deleted from the reserve map.
634  * In the normal case, the return value is zero or more.  In the
635  * case where a region must be split, a new region descriptor must
636  * be allocated.  If the allocation fails, -ENOMEM will be returned.
637  * NOTE: If the parameter t == LONG_MAX, then we will never split
638  * a region and possibly return -ENOMEM.  Callers specifying
639  * t == LONG_MAX do not need to check for -ENOMEM error.
640  */
641 static long region_del(struct resv_map *resv, long f, long t)
642 {
643 	struct list_head *head = &resv->regions;
644 	struct file_region *rg, *trg;
645 	struct file_region *nrg = NULL;
646 	long del = 0;
647 
648 retry:
649 	spin_lock(&resv->lock);
650 	list_for_each_entry_safe(rg, trg, head, link) {
651 		/*
652 		 * Skip regions before the range to be deleted.  file_region
653 		 * ranges are normally of the form [from, to).  However, there
654 		 * may be a "placeholder" entry in the map which is of the form
655 		 * (from, to) with from == to.  Check for placeholder entries
656 		 * at the beginning of the range to be deleted.
657 		 */
658 		if (rg->to <= f && (rg->to != rg->from || rg->to != f))
659 			continue;
660 
661 		if (rg->from >= t)
662 			break;
663 
664 		if (f > rg->from && t < rg->to) { /* Must split region */
665 			/*
666 			 * Check for an entry in the cache before dropping
667 			 * lock and attempting allocation.
668 			 */
669 			if (!nrg &&
670 			    resv->region_cache_count > resv->adds_in_progress) {
671 				nrg = list_first_entry(&resv->region_cache,
672 							struct file_region,
673 							link);
674 				list_del(&nrg->link);
675 				resv->region_cache_count--;
676 			}
677 
678 			if (!nrg) {
679 				spin_unlock(&resv->lock);
680 				nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
681 				if (!nrg)
682 					return -ENOMEM;
683 				goto retry;
684 			}
685 
686 			del += t - f;
687 			hugetlb_cgroup_uncharge_file_region(
688 				resv, rg, t - f, false);
689 
690 			/* New entry for end of split region */
691 			nrg->from = t;
692 			nrg->to = rg->to;
693 
694 			copy_hugetlb_cgroup_uncharge_info(nrg, rg);
695 
696 			INIT_LIST_HEAD(&nrg->link);
697 
698 			/* Original entry is trimmed */
699 			rg->to = f;
700 
701 			list_add(&nrg->link, &rg->link);
702 			nrg = NULL;
703 			break;
704 		}
705 
706 		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
707 			del += rg->to - rg->from;
708 			hugetlb_cgroup_uncharge_file_region(resv, rg,
709 							    rg->to - rg->from, true);
710 			list_del(&rg->link);
711 			kfree(rg);
712 			continue;
713 		}
714 
715 		if (f <= rg->from) {	/* Trim beginning of region */
716 			hugetlb_cgroup_uncharge_file_region(resv, rg,
717 							    t - rg->from, false);
718 
719 			del += t - rg->from;
720 			rg->from = t;
721 		} else {		/* Trim end of region */
722 			hugetlb_cgroup_uncharge_file_region(resv, rg,
723 							    rg->to - f, false);
724 
725 			del += rg->to - f;
726 			rg->to = f;
727 		}
728 	}
729 
730 	spin_unlock(&resv->lock);
731 	kfree(nrg);
732 	return del;
733 }
734 
735 /*
736  * A rare out of memory error was encountered which prevented removal of
737  * the reserve map region for a page.  The huge page itself was free'ed
738  * and removed from the page cache.  This routine will adjust the subpool
739  * usage count, and the global reserve count if needed.  By incrementing
740  * these counts, the reserve map entry which could not be deleted will
741  * appear as a "reserved" entry instead of simply dangling with incorrect
742  * counts.
743  */
744 void hugetlb_fix_reserve_counts(struct inode *inode)
745 {
746 	struct hugepage_subpool *spool = subpool_inode(inode);
747 	long rsv_adjust;
748 	bool reserved = false;
749 
750 	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
751 	if (rsv_adjust > 0) {
752 		struct hstate *h = hstate_inode(inode);
753 
754 		if (!hugetlb_acct_memory(h, 1))
755 			reserved = true;
756 	} else if (!rsv_adjust) {
757 		reserved = true;
758 	}
759 
760 	if (!reserved)
761 		pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
762 }
763 
764 /*
765  * Count and return the number of huge pages in the reserve map
766  * that intersect with the range [f, t).
767  */
768 static long region_count(struct resv_map *resv, long f, long t)
769 {
770 	struct list_head *head = &resv->regions;
771 	struct file_region *rg;
772 	long chg = 0;
773 
774 	spin_lock(&resv->lock);
775 	/* Locate each segment we overlap with, and count that overlap. */
776 	list_for_each_entry(rg, head, link) {
777 		long seg_from;
778 		long seg_to;
779 
780 		if (rg->to <= f)
781 			continue;
782 		if (rg->from >= t)
783 			break;
784 
785 		seg_from = max(rg->from, f);
786 		seg_to = min(rg->to, t);
787 
788 		chg += seg_to - seg_from;
789 	}
790 	spin_unlock(&resv->lock);
791 
792 	return chg;
793 }
794 
795 /*
796  * Convert the address within this vma to the page offset within
797  * the mapping, in pagecache page units; huge pages here.
798  */
799 static pgoff_t vma_hugecache_offset(struct hstate *h,
800 			struct vm_area_struct *vma, unsigned long address)
801 {
802 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
803 			(vma->vm_pgoff >> huge_page_order(h));
804 }
805 
806 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
807 				     unsigned long address)
808 {
809 	return vma_hugecache_offset(hstate_vma(vma), vma, address);
810 }
811 EXPORT_SYMBOL_GPL(linear_hugepage_index);
812 
813 /*
814  * Return the size of the pages allocated when backing a VMA. In the majority
815  * cases this will be same size as used by the page table entries.
816  */
817 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
818 {
819 	if (vma->vm_ops && vma->vm_ops->pagesize)
820 		return vma->vm_ops->pagesize(vma);
821 	return PAGE_SIZE;
822 }
823 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
824 
825 /*
826  * Return the page size being used by the MMU to back a VMA. In the majority
827  * of cases, the page size used by the kernel matches the MMU size. On
828  * architectures where it differs, an architecture-specific 'strong'
829  * version of this symbol is required.
830  */
831 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
832 {
833 	return vma_kernel_pagesize(vma);
834 }
835 
836 /*
837  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
838  * bits of the reservation map pointer, which are always clear due to
839  * alignment.
840  */
841 #define HPAGE_RESV_OWNER    (1UL << 0)
842 #define HPAGE_RESV_UNMAPPED (1UL << 1)
843 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
844 
845 /*
846  * These helpers are used to track how many pages are reserved for
847  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
848  * is guaranteed to have their future faults succeed.
849  *
850  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
851  * the reserve counters are updated with the hugetlb_lock held. It is safe
852  * to reset the VMA at fork() time as it is not in use yet and there is no
853  * chance of the global counters getting corrupted as a result of the values.
854  *
855  * The private mapping reservation is represented in a subtly different
856  * manner to a shared mapping.  A shared mapping has a region map associated
857  * with the underlying file, this region map represents the backing file
858  * pages which have ever had a reservation assigned which this persists even
859  * after the page is instantiated.  A private mapping has a region map
860  * associated with the original mmap which is attached to all VMAs which
861  * reference it, this region map represents those offsets which have consumed
862  * reservation ie. where pages have been instantiated.
863  */
864 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
865 {
866 	return (unsigned long)vma->vm_private_data;
867 }
868 
869 static void set_vma_private_data(struct vm_area_struct *vma,
870 							unsigned long value)
871 {
872 	vma->vm_private_data = (void *)value;
873 }
874 
875 static void
876 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
877 					  struct hugetlb_cgroup *h_cg,
878 					  struct hstate *h)
879 {
880 #ifdef CONFIG_CGROUP_HUGETLB
881 	if (!h_cg || !h) {
882 		resv_map->reservation_counter = NULL;
883 		resv_map->pages_per_hpage = 0;
884 		resv_map->css = NULL;
885 	} else {
886 		resv_map->reservation_counter =
887 			&h_cg->rsvd_hugepage[hstate_index(h)];
888 		resv_map->pages_per_hpage = pages_per_huge_page(h);
889 		resv_map->css = &h_cg->css;
890 	}
891 #endif
892 }
893 
894 struct resv_map *resv_map_alloc(void)
895 {
896 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
897 	struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
898 
899 	if (!resv_map || !rg) {
900 		kfree(resv_map);
901 		kfree(rg);
902 		return NULL;
903 	}
904 
905 	kref_init(&resv_map->refs);
906 	spin_lock_init(&resv_map->lock);
907 	INIT_LIST_HEAD(&resv_map->regions);
908 
909 	resv_map->adds_in_progress = 0;
910 	/*
911 	 * Initialize these to 0. On shared mappings, 0's here indicate these
912 	 * fields don't do cgroup accounting. On private mappings, these will be
913 	 * re-initialized to the proper values, to indicate that hugetlb cgroup
914 	 * reservations are to be un-charged from here.
915 	 */
916 	resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
917 
918 	INIT_LIST_HEAD(&resv_map->region_cache);
919 	list_add(&rg->link, &resv_map->region_cache);
920 	resv_map->region_cache_count = 1;
921 
922 	return resv_map;
923 }
924 
925 void resv_map_release(struct kref *ref)
926 {
927 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
928 	struct list_head *head = &resv_map->region_cache;
929 	struct file_region *rg, *trg;
930 
931 	/* Clear out any active regions before we release the map. */
932 	region_del(resv_map, 0, LONG_MAX);
933 
934 	/* ... and any entries left in the cache */
935 	list_for_each_entry_safe(rg, trg, head, link) {
936 		list_del(&rg->link);
937 		kfree(rg);
938 	}
939 
940 	VM_BUG_ON(resv_map->adds_in_progress);
941 
942 	kfree(resv_map);
943 }
944 
945 static inline struct resv_map *inode_resv_map(struct inode *inode)
946 {
947 	/*
948 	 * At inode evict time, i_mapping may not point to the original
949 	 * address space within the inode.  This original address space
950 	 * contains the pointer to the resv_map.  So, always use the
951 	 * address space embedded within the inode.
952 	 * The VERY common case is inode->mapping == &inode->i_data but,
953 	 * this may not be true for device special inodes.
954 	 */
955 	return (struct resv_map *)(&inode->i_data)->private_data;
956 }
957 
958 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
959 {
960 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
961 	if (vma->vm_flags & VM_MAYSHARE) {
962 		struct address_space *mapping = vma->vm_file->f_mapping;
963 		struct inode *inode = mapping->host;
964 
965 		return inode_resv_map(inode);
966 
967 	} else {
968 		return (struct resv_map *)(get_vma_private_data(vma) &
969 							~HPAGE_RESV_MASK);
970 	}
971 }
972 
973 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
974 {
975 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
976 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
977 
978 	set_vma_private_data(vma, (get_vma_private_data(vma) &
979 				HPAGE_RESV_MASK) | (unsigned long)map);
980 }
981 
982 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
983 {
984 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
985 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
986 
987 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
988 }
989 
990 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
991 {
992 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
993 
994 	return (get_vma_private_data(vma) & flag) != 0;
995 }
996 
997 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
998 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
999 {
1000 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1001 	if (!(vma->vm_flags & VM_MAYSHARE))
1002 		vma->vm_private_data = (void *)0;
1003 }
1004 
1005 /* Returns true if the VMA has associated reserve pages */
1006 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
1007 {
1008 	if (vma->vm_flags & VM_NORESERVE) {
1009 		/*
1010 		 * This address is already reserved by other process(chg == 0),
1011 		 * so, we should decrement reserved count. Without decrementing,
1012 		 * reserve count remains after releasing inode, because this
1013 		 * allocated page will go into page cache and is regarded as
1014 		 * coming from reserved pool in releasing step.  Currently, we
1015 		 * don't have any other solution to deal with this situation
1016 		 * properly, so add work-around here.
1017 		 */
1018 		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
1019 			return true;
1020 		else
1021 			return false;
1022 	}
1023 
1024 	/* Shared mappings always use reserves */
1025 	if (vma->vm_flags & VM_MAYSHARE) {
1026 		/*
1027 		 * We know VM_NORESERVE is not set.  Therefore, there SHOULD
1028 		 * be a region map for all pages.  The only situation where
1029 		 * there is no region map is if a hole was punched via
1030 		 * fallocate.  In this case, there really are no reserves to
1031 		 * use.  This situation is indicated if chg != 0.
1032 		 */
1033 		if (chg)
1034 			return false;
1035 		else
1036 			return true;
1037 	}
1038 
1039 	/*
1040 	 * Only the process that called mmap() has reserves for
1041 	 * private mappings.
1042 	 */
1043 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1044 		/*
1045 		 * Like the shared case above, a hole punch or truncate
1046 		 * could have been performed on the private mapping.
1047 		 * Examine the value of chg to determine if reserves
1048 		 * actually exist or were previously consumed.
1049 		 * Very Subtle - The value of chg comes from a previous
1050 		 * call to vma_needs_reserves().  The reserve map for
1051 		 * private mappings has different (opposite) semantics
1052 		 * than that of shared mappings.  vma_needs_reserves()
1053 		 * has already taken this difference in semantics into
1054 		 * account.  Therefore, the meaning of chg is the same
1055 		 * as in the shared case above.  Code could easily be
1056 		 * combined, but keeping it separate draws attention to
1057 		 * subtle differences.
1058 		 */
1059 		if (chg)
1060 			return false;
1061 		else
1062 			return true;
1063 	}
1064 
1065 	return false;
1066 }
1067 
1068 static void enqueue_huge_page(struct hstate *h, struct page *page)
1069 {
1070 	int nid = page_to_nid(page);
1071 
1072 	lockdep_assert_held(&hugetlb_lock);
1073 	list_move(&page->lru, &h->hugepage_freelists[nid]);
1074 	h->free_huge_pages++;
1075 	h->free_huge_pages_node[nid]++;
1076 	SetHPageFreed(page);
1077 }
1078 
1079 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
1080 {
1081 	struct page *page;
1082 	bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1083 
1084 	lockdep_assert_held(&hugetlb_lock);
1085 	list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
1086 		if (pin && !is_pinnable_page(page))
1087 			continue;
1088 
1089 		if (PageHWPoison(page))
1090 			continue;
1091 
1092 		list_move(&page->lru, &h->hugepage_activelist);
1093 		set_page_refcounted(page);
1094 		ClearHPageFreed(page);
1095 		h->free_huge_pages--;
1096 		h->free_huge_pages_node[nid]--;
1097 		return page;
1098 	}
1099 
1100 	return NULL;
1101 }
1102 
1103 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
1104 		nodemask_t *nmask)
1105 {
1106 	unsigned int cpuset_mems_cookie;
1107 	struct zonelist *zonelist;
1108 	struct zone *zone;
1109 	struct zoneref *z;
1110 	int node = NUMA_NO_NODE;
1111 
1112 	zonelist = node_zonelist(nid, gfp_mask);
1113 
1114 retry_cpuset:
1115 	cpuset_mems_cookie = read_mems_allowed_begin();
1116 	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1117 		struct page *page;
1118 
1119 		if (!cpuset_zone_allowed(zone, gfp_mask))
1120 			continue;
1121 		/*
1122 		 * no need to ask again on the same node. Pool is node rather than
1123 		 * zone aware
1124 		 */
1125 		if (zone_to_nid(zone) == node)
1126 			continue;
1127 		node = zone_to_nid(zone);
1128 
1129 		page = dequeue_huge_page_node_exact(h, node);
1130 		if (page)
1131 			return page;
1132 	}
1133 	if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1134 		goto retry_cpuset;
1135 
1136 	return NULL;
1137 }
1138 
1139 static struct page *dequeue_huge_page_vma(struct hstate *h,
1140 				struct vm_area_struct *vma,
1141 				unsigned long address, int avoid_reserve,
1142 				long chg)
1143 {
1144 	struct page *page;
1145 	struct mempolicy *mpol;
1146 	gfp_t gfp_mask;
1147 	nodemask_t *nodemask;
1148 	int nid;
1149 
1150 	/*
1151 	 * A child process with MAP_PRIVATE mappings created by their parent
1152 	 * have no page reserves. This check ensures that reservations are
1153 	 * not "stolen". The child may still get SIGKILLed
1154 	 */
1155 	if (!vma_has_reserves(vma, chg) &&
1156 			h->free_huge_pages - h->resv_huge_pages == 0)
1157 		goto err;
1158 
1159 	/* If reserves cannot be used, ensure enough pages are in the pool */
1160 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
1161 		goto err;
1162 
1163 	gfp_mask = htlb_alloc_mask(h);
1164 	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1165 	page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1166 	if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
1167 		SetHPageRestoreReserve(page);
1168 		h->resv_huge_pages--;
1169 	}
1170 
1171 	mpol_cond_put(mpol);
1172 	return page;
1173 
1174 err:
1175 	return NULL;
1176 }
1177 
1178 /*
1179  * common helper functions for hstate_next_node_to_{alloc|free}.
1180  * We may have allocated or freed a huge page based on a different
1181  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1182  * be outside of *nodes_allowed.  Ensure that we use an allowed
1183  * node for alloc or free.
1184  */
1185 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
1186 {
1187 	nid = next_node_in(nid, *nodes_allowed);
1188 	VM_BUG_ON(nid >= MAX_NUMNODES);
1189 
1190 	return nid;
1191 }
1192 
1193 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1194 {
1195 	if (!node_isset(nid, *nodes_allowed))
1196 		nid = next_node_allowed(nid, nodes_allowed);
1197 	return nid;
1198 }
1199 
1200 /*
1201  * returns the previously saved node ["this node"] from which to
1202  * allocate a persistent huge page for the pool and advance the
1203  * next node from which to allocate, handling wrap at end of node
1204  * mask.
1205  */
1206 static int hstate_next_node_to_alloc(struct hstate *h,
1207 					nodemask_t *nodes_allowed)
1208 {
1209 	int nid;
1210 
1211 	VM_BUG_ON(!nodes_allowed);
1212 
1213 	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1214 	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1215 
1216 	return nid;
1217 }
1218 
1219 /*
1220  * helper for remove_pool_huge_page() - return the previously saved
1221  * node ["this node"] from which to free a huge page.  Advance the
1222  * next node id whether or not we find a free huge page to free so
1223  * that the next attempt to free addresses the next node.
1224  */
1225 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1226 {
1227 	int nid;
1228 
1229 	VM_BUG_ON(!nodes_allowed);
1230 
1231 	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1232 	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1233 
1234 	return nid;
1235 }
1236 
1237 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)		\
1238 	for (nr_nodes = nodes_weight(*mask);				\
1239 		nr_nodes > 0 &&						\
1240 		((node = hstate_next_node_to_alloc(hs, mask)) || 1);	\
1241 		nr_nodes--)
1242 
1243 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)		\
1244 	for (nr_nodes = nodes_weight(*mask);				\
1245 		nr_nodes > 0 &&						\
1246 		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
1247 		nr_nodes--)
1248 
1249 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1250 static void destroy_compound_gigantic_page(struct page *page,
1251 					unsigned int order)
1252 {
1253 	int i;
1254 	int nr_pages = 1 << order;
1255 	struct page *p = page + 1;
1256 
1257 	atomic_set(compound_mapcount_ptr(page), 0);
1258 	atomic_set(compound_pincount_ptr(page), 0);
1259 
1260 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1261 		clear_compound_head(p);
1262 		set_page_refcounted(p);
1263 	}
1264 
1265 	set_compound_order(page, 0);
1266 	page[1].compound_nr = 0;
1267 	__ClearPageHead(page);
1268 }
1269 
1270 static void free_gigantic_page(struct page *page, unsigned int order)
1271 {
1272 	/*
1273 	 * If the page isn't allocated using the cma allocator,
1274 	 * cma_release() returns false.
1275 	 */
1276 #ifdef CONFIG_CMA
1277 	if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
1278 		return;
1279 #endif
1280 
1281 	free_contig_range(page_to_pfn(page), 1 << order);
1282 }
1283 
1284 #ifdef CONFIG_CONTIG_ALLOC
1285 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1286 		int nid, nodemask_t *nodemask)
1287 {
1288 	unsigned long nr_pages = pages_per_huge_page(h);
1289 	if (nid == NUMA_NO_NODE)
1290 		nid = numa_mem_id();
1291 
1292 #ifdef CONFIG_CMA
1293 	{
1294 		struct page *page;
1295 		int node;
1296 
1297 		if (hugetlb_cma[nid]) {
1298 			page = cma_alloc(hugetlb_cma[nid], nr_pages,
1299 					huge_page_order(h), true);
1300 			if (page)
1301 				return page;
1302 		}
1303 
1304 		if (!(gfp_mask & __GFP_THISNODE)) {
1305 			for_each_node_mask(node, *nodemask) {
1306 				if (node == nid || !hugetlb_cma[node])
1307 					continue;
1308 
1309 				page = cma_alloc(hugetlb_cma[node], nr_pages,
1310 						huge_page_order(h), true);
1311 				if (page)
1312 					return page;
1313 			}
1314 		}
1315 	}
1316 #endif
1317 
1318 	return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
1319 }
1320 
1321 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1322 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1323 #else /* !CONFIG_CONTIG_ALLOC */
1324 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1325 					int nid, nodemask_t *nodemask)
1326 {
1327 	return NULL;
1328 }
1329 #endif /* CONFIG_CONTIG_ALLOC */
1330 
1331 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1332 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1333 					int nid, nodemask_t *nodemask)
1334 {
1335 	return NULL;
1336 }
1337 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1338 static inline void destroy_compound_gigantic_page(struct page *page,
1339 						unsigned int order) { }
1340 #endif
1341 
1342 /*
1343  * Remove hugetlb page from lists, and update dtor so that page appears
1344  * as just a compound page.  A reference is held on the page.
1345  *
1346  * Must be called with hugetlb lock held.
1347  */
1348 static void remove_hugetlb_page(struct hstate *h, struct page *page,
1349 							bool adjust_surplus)
1350 {
1351 	int nid = page_to_nid(page);
1352 
1353 	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1354 	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
1355 
1356 	lockdep_assert_held(&hugetlb_lock);
1357 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1358 		return;
1359 
1360 	list_del(&page->lru);
1361 
1362 	if (HPageFreed(page)) {
1363 		h->free_huge_pages--;
1364 		h->free_huge_pages_node[nid]--;
1365 	}
1366 	if (adjust_surplus) {
1367 		h->surplus_huge_pages--;
1368 		h->surplus_huge_pages_node[nid]--;
1369 	}
1370 
1371 	set_page_refcounted(page);
1372 	set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1373 
1374 	h->nr_huge_pages--;
1375 	h->nr_huge_pages_node[nid]--;
1376 }
1377 
1378 static void update_and_free_page(struct hstate *h, struct page *page)
1379 {
1380 	int i;
1381 	struct page *subpage = page;
1382 
1383 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1384 		return;
1385 
1386 	for (i = 0; i < pages_per_huge_page(h);
1387 	     i++, subpage = mem_map_next(subpage, page, i)) {
1388 		subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1389 				1 << PG_referenced | 1 << PG_dirty |
1390 				1 << PG_active | 1 << PG_private |
1391 				1 << PG_writeback);
1392 	}
1393 	if (hstate_is_gigantic(h)) {
1394 		destroy_compound_gigantic_page(page, huge_page_order(h));
1395 		free_gigantic_page(page, huge_page_order(h));
1396 	} else {
1397 		__free_pages(page, huge_page_order(h));
1398 	}
1399 }
1400 
1401 static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
1402 {
1403 	struct page *page, *t_page;
1404 
1405 	list_for_each_entry_safe(page, t_page, list, lru) {
1406 		update_and_free_page(h, page);
1407 		cond_resched();
1408 	}
1409 }
1410 
1411 struct hstate *size_to_hstate(unsigned long size)
1412 {
1413 	struct hstate *h;
1414 
1415 	for_each_hstate(h) {
1416 		if (huge_page_size(h) == size)
1417 			return h;
1418 	}
1419 	return NULL;
1420 }
1421 
1422 void free_huge_page(struct page *page)
1423 {
1424 	/*
1425 	 * Can't pass hstate in here because it is called from the
1426 	 * compound page destructor.
1427 	 */
1428 	struct hstate *h = page_hstate(page);
1429 	int nid = page_to_nid(page);
1430 	struct hugepage_subpool *spool = hugetlb_page_subpool(page);
1431 	bool restore_reserve;
1432 	unsigned long flags;
1433 
1434 	VM_BUG_ON_PAGE(page_count(page), page);
1435 	VM_BUG_ON_PAGE(page_mapcount(page), page);
1436 
1437 	hugetlb_set_page_subpool(page, NULL);
1438 	page->mapping = NULL;
1439 	restore_reserve = HPageRestoreReserve(page);
1440 	ClearHPageRestoreReserve(page);
1441 
1442 	/*
1443 	 * If HPageRestoreReserve was set on page, page allocation consumed a
1444 	 * reservation.  If the page was associated with a subpool, there
1445 	 * would have been a page reserved in the subpool before allocation
1446 	 * via hugepage_subpool_get_pages().  Since we are 'restoring' the
1447 	 * reservation, do not call hugepage_subpool_put_pages() as this will
1448 	 * remove the reserved page from the subpool.
1449 	 */
1450 	if (!restore_reserve) {
1451 		/*
1452 		 * A return code of zero implies that the subpool will be
1453 		 * under its minimum size if the reservation is not restored
1454 		 * after page is free.  Therefore, force restore_reserve
1455 		 * operation.
1456 		 */
1457 		if (hugepage_subpool_put_pages(spool, 1) == 0)
1458 			restore_reserve = true;
1459 	}
1460 
1461 	spin_lock_irqsave(&hugetlb_lock, flags);
1462 	ClearHPageMigratable(page);
1463 	hugetlb_cgroup_uncharge_page(hstate_index(h),
1464 				     pages_per_huge_page(h), page);
1465 	hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
1466 					  pages_per_huge_page(h), page);
1467 	if (restore_reserve)
1468 		h->resv_huge_pages++;
1469 
1470 	if (HPageTemporary(page)) {
1471 		remove_hugetlb_page(h, page, false);
1472 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1473 		update_and_free_page(h, page);
1474 	} else if (h->surplus_huge_pages_node[nid]) {
1475 		/* remove the page from active list */
1476 		remove_hugetlb_page(h, page, true);
1477 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1478 		update_and_free_page(h, page);
1479 	} else {
1480 		arch_clear_hugepage_flags(page);
1481 		enqueue_huge_page(h, page);
1482 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1483 	}
1484 }
1485 
1486 /*
1487  * Must be called with the hugetlb lock held
1488  */
1489 static void __prep_account_new_huge_page(struct hstate *h, int nid)
1490 {
1491 	lockdep_assert_held(&hugetlb_lock);
1492 	h->nr_huge_pages++;
1493 	h->nr_huge_pages_node[nid]++;
1494 }
1495 
1496 static void __prep_new_huge_page(struct page *page)
1497 {
1498 	INIT_LIST_HEAD(&page->lru);
1499 	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1500 	hugetlb_set_page_subpool(page, NULL);
1501 	set_hugetlb_cgroup(page, NULL);
1502 	set_hugetlb_cgroup_rsvd(page, NULL);
1503 }
1504 
1505 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1506 {
1507 	__prep_new_huge_page(page);
1508 	spin_lock_irq(&hugetlb_lock);
1509 	__prep_account_new_huge_page(h, nid);
1510 	spin_unlock_irq(&hugetlb_lock);
1511 }
1512 
1513 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1514 {
1515 	int i;
1516 	int nr_pages = 1 << order;
1517 	struct page *p = page + 1;
1518 
1519 	/* we rely on prep_new_huge_page to set the destructor */
1520 	set_compound_order(page, order);
1521 	__ClearPageReserved(page);
1522 	__SetPageHead(page);
1523 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1524 		/*
1525 		 * For gigantic hugepages allocated through bootmem at
1526 		 * boot, it's safer to be consistent with the not-gigantic
1527 		 * hugepages and clear the PG_reserved bit from all tail pages
1528 		 * too.  Otherwise drivers using get_user_pages() to access tail
1529 		 * pages may get the reference counting wrong if they see
1530 		 * PG_reserved set on a tail page (despite the head page not
1531 		 * having PG_reserved set).  Enforcing this consistency between
1532 		 * head and tail pages allows drivers to optimize away a check
1533 		 * on the head page when they need know if put_page() is needed
1534 		 * after get_user_pages().
1535 		 */
1536 		__ClearPageReserved(p);
1537 		set_page_count(p, 0);
1538 		set_compound_head(p, page);
1539 	}
1540 	atomic_set(compound_mapcount_ptr(page), -1);
1541 	atomic_set(compound_pincount_ptr(page), 0);
1542 }
1543 
1544 /*
1545  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1546  * transparent huge pages.  See the PageTransHuge() documentation for more
1547  * details.
1548  */
1549 int PageHuge(struct page *page)
1550 {
1551 	if (!PageCompound(page))
1552 		return 0;
1553 
1554 	page = compound_head(page);
1555 	return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1556 }
1557 EXPORT_SYMBOL_GPL(PageHuge);
1558 
1559 /*
1560  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1561  * normal or transparent huge pages.
1562  */
1563 int PageHeadHuge(struct page *page_head)
1564 {
1565 	if (!PageHead(page_head))
1566 		return 0;
1567 
1568 	return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
1569 }
1570 
1571 /*
1572  * Find and lock address space (mapping) in write mode.
1573  *
1574  * Upon entry, the page is locked which means that page_mapping() is
1575  * stable.  Due to locking order, we can only trylock_write.  If we can
1576  * not get the lock, simply return NULL to caller.
1577  */
1578 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
1579 {
1580 	struct address_space *mapping = page_mapping(hpage);
1581 
1582 	if (!mapping)
1583 		return mapping;
1584 
1585 	if (i_mmap_trylock_write(mapping))
1586 		return mapping;
1587 
1588 	return NULL;
1589 }
1590 
1591 pgoff_t hugetlb_basepage_index(struct page *page)
1592 {
1593 	struct page *page_head = compound_head(page);
1594 	pgoff_t index = page_index(page_head);
1595 	unsigned long compound_idx;
1596 
1597 	if (compound_order(page_head) >= MAX_ORDER)
1598 		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1599 	else
1600 		compound_idx = page - page_head;
1601 
1602 	return (index << compound_order(page_head)) + compound_idx;
1603 }
1604 
1605 static struct page *alloc_buddy_huge_page(struct hstate *h,
1606 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
1607 		nodemask_t *node_alloc_noretry)
1608 {
1609 	int order = huge_page_order(h);
1610 	struct page *page;
1611 	bool alloc_try_hard = true;
1612 
1613 	/*
1614 	 * By default we always try hard to allocate the page with
1615 	 * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating pages in
1616 	 * a loop (to adjust global huge page counts) and previous allocation
1617 	 * failed, do not continue to try hard on the same node.  Use the
1618 	 * node_alloc_noretry bitmap to manage this state information.
1619 	 */
1620 	if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1621 		alloc_try_hard = false;
1622 	gfp_mask |= __GFP_COMP|__GFP_NOWARN;
1623 	if (alloc_try_hard)
1624 		gfp_mask |= __GFP_RETRY_MAYFAIL;
1625 	if (nid == NUMA_NO_NODE)
1626 		nid = numa_mem_id();
1627 	page = __alloc_pages(gfp_mask, order, nid, nmask);
1628 	if (page)
1629 		__count_vm_event(HTLB_BUDDY_PGALLOC);
1630 	else
1631 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1632 
1633 	/*
1634 	 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
1635 	 * indicates an overall state change.  Clear bit so that we resume
1636 	 * normal 'try hard' allocations.
1637 	 */
1638 	if (node_alloc_noretry && page && !alloc_try_hard)
1639 		node_clear(nid, *node_alloc_noretry);
1640 
1641 	/*
1642 	 * If we tried hard to get a page but failed, set bit so that
1643 	 * subsequent attempts will not try as hard until there is an
1644 	 * overall state change.
1645 	 */
1646 	if (node_alloc_noretry && !page && alloc_try_hard)
1647 		node_set(nid, *node_alloc_noretry);
1648 
1649 	return page;
1650 }
1651 
1652 /*
1653  * Common helper to allocate a fresh hugetlb page. All specific allocators
1654  * should use this function to get new hugetlb pages
1655  */
1656 static struct page *alloc_fresh_huge_page(struct hstate *h,
1657 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
1658 		nodemask_t *node_alloc_noretry)
1659 {
1660 	struct page *page;
1661 
1662 	if (hstate_is_gigantic(h))
1663 		page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1664 	else
1665 		page = alloc_buddy_huge_page(h, gfp_mask,
1666 				nid, nmask, node_alloc_noretry);
1667 	if (!page)
1668 		return NULL;
1669 
1670 	if (hstate_is_gigantic(h))
1671 		prep_compound_gigantic_page(page, huge_page_order(h));
1672 	prep_new_huge_page(h, page, page_to_nid(page));
1673 
1674 	return page;
1675 }
1676 
1677 /*
1678  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1679  * manner.
1680  */
1681 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1682 				nodemask_t *node_alloc_noretry)
1683 {
1684 	struct page *page;
1685 	int nr_nodes, node;
1686 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1687 
1688 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1689 		page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
1690 						node_alloc_noretry);
1691 		if (page)
1692 			break;
1693 	}
1694 
1695 	if (!page)
1696 		return 0;
1697 
1698 	put_page(page); /* free it into the hugepage allocator */
1699 
1700 	return 1;
1701 }
1702 
1703 /*
1704  * Remove huge page from pool from next node to free.  Attempt to keep
1705  * persistent huge pages more or less balanced over allowed nodes.
1706  * This routine only 'removes' the hugetlb page.  The caller must make
1707  * an additional call to free the page to low level allocators.
1708  * Called with hugetlb_lock locked.
1709  */
1710 static struct page *remove_pool_huge_page(struct hstate *h,
1711 						nodemask_t *nodes_allowed,
1712 						 bool acct_surplus)
1713 {
1714 	int nr_nodes, node;
1715 	struct page *page = NULL;
1716 
1717 	lockdep_assert_held(&hugetlb_lock);
1718 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1719 		/*
1720 		 * If we're returning unused surplus pages, only examine
1721 		 * nodes with surplus pages.
1722 		 */
1723 		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1724 		    !list_empty(&h->hugepage_freelists[node])) {
1725 			page = list_entry(h->hugepage_freelists[node].next,
1726 					  struct page, lru);
1727 			remove_hugetlb_page(h, page, acct_surplus);
1728 			break;
1729 		}
1730 	}
1731 
1732 	return page;
1733 }
1734 
1735 /*
1736  * Dissolve a given free hugepage into free buddy pages. This function does
1737  * nothing for in-use hugepages and non-hugepages.
1738  * This function returns values like below:
1739  *
1740  *  -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
1741  *          (allocated or reserved.)
1742  *       0: successfully dissolved free hugepages or the page is not a
1743  *          hugepage (considered as already dissolved)
1744  */
1745 int dissolve_free_huge_page(struct page *page)
1746 {
1747 	int rc = -EBUSY;
1748 
1749 retry:
1750 	/* Not to disrupt normal path by vainly holding hugetlb_lock */
1751 	if (!PageHuge(page))
1752 		return 0;
1753 
1754 	spin_lock_irq(&hugetlb_lock);
1755 	if (!PageHuge(page)) {
1756 		rc = 0;
1757 		goto out;
1758 	}
1759 
1760 	if (!page_count(page)) {
1761 		struct page *head = compound_head(page);
1762 		struct hstate *h = page_hstate(head);
1763 		if (h->free_huge_pages - h->resv_huge_pages == 0)
1764 			goto out;
1765 
1766 		/*
1767 		 * We should make sure that the page is already on the free list
1768 		 * when it is dissolved.
1769 		 */
1770 		if (unlikely(!HPageFreed(head))) {
1771 			spin_unlock_irq(&hugetlb_lock);
1772 			cond_resched();
1773 
1774 			/*
1775 			 * Theoretically, we should return -EBUSY when we
1776 			 * encounter this race. In fact, we have a chance
1777 			 * to successfully dissolve the page if we do a
1778 			 * retry. Because the race window is quite small.
1779 			 * If we seize this opportunity, it is an optimization
1780 			 * for increasing the success rate of dissolving page.
1781 			 */
1782 			goto retry;
1783 		}
1784 
1785 		/*
1786 		 * Move PageHWPoison flag from head page to the raw error page,
1787 		 * which makes any subpages rather than the error page reusable.
1788 		 */
1789 		if (PageHWPoison(head) && page != head) {
1790 			SetPageHWPoison(page);
1791 			ClearPageHWPoison(head);
1792 		}
1793 		remove_hugetlb_page(h, head, false);
1794 		h->max_huge_pages--;
1795 		spin_unlock_irq(&hugetlb_lock);
1796 		update_and_free_page(h, head);
1797 		return 0;
1798 	}
1799 out:
1800 	spin_unlock_irq(&hugetlb_lock);
1801 	return rc;
1802 }
1803 
1804 /*
1805  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1806  * make specified memory blocks removable from the system.
1807  * Note that this will dissolve a free gigantic hugepage completely, if any
1808  * part of it lies within the given range.
1809  * Also note that if dissolve_free_huge_page() returns with an error, all
1810  * free hugepages that were dissolved before that error are lost.
1811  */
1812 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1813 {
1814 	unsigned long pfn;
1815 	struct page *page;
1816 	int rc = 0;
1817 
1818 	if (!hugepages_supported())
1819 		return rc;
1820 
1821 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1822 		page = pfn_to_page(pfn);
1823 		rc = dissolve_free_huge_page(page);
1824 		if (rc)
1825 			break;
1826 	}
1827 
1828 	return rc;
1829 }
1830 
1831 /*
1832  * Allocates a fresh surplus page from the page allocator.
1833  */
1834 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1835 		int nid, nodemask_t *nmask)
1836 {
1837 	struct page *page = NULL;
1838 
1839 	if (hstate_is_gigantic(h))
1840 		return NULL;
1841 
1842 	spin_lock_irq(&hugetlb_lock);
1843 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
1844 		goto out_unlock;
1845 	spin_unlock_irq(&hugetlb_lock);
1846 
1847 	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1848 	if (!page)
1849 		return NULL;
1850 
1851 	spin_lock_irq(&hugetlb_lock);
1852 	/*
1853 	 * We could have raced with the pool size change.
1854 	 * Double check that and simply deallocate the new page
1855 	 * if we would end up overcommiting the surpluses. Abuse
1856 	 * temporary page to workaround the nasty free_huge_page
1857 	 * codeflow
1858 	 */
1859 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1860 		SetHPageTemporary(page);
1861 		spin_unlock_irq(&hugetlb_lock);
1862 		put_page(page);
1863 		return NULL;
1864 	} else {
1865 		h->surplus_huge_pages++;
1866 		h->surplus_huge_pages_node[page_to_nid(page)]++;
1867 	}
1868 
1869 out_unlock:
1870 	spin_unlock_irq(&hugetlb_lock);
1871 
1872 	return page;
1873 }
1874 
1875 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1876 				     int nid, nodemask_t *nmask)
1877 {
1878 	struct page *page;
1879 
1880 	if (hstate_is_gigantic(h))
1881 		return NULL;
1882 
1883 	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1884 	if (!page)
1885 		return NULL;
1886 
1887 	/*
1888 	 * We do not account these pages as surplus because they are only
1889 	 * temporary and will be released properly on the last reference
1890 	 */
1891 	SetHPageTemporary(page);
1892 
1893 	return page;
1894 }
1895 
1896 /*
1897  * Use the VMA's mpolicy to allocate a huge page from the buddy.
1898  */
1899 static
1900 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1901 		struct vm_area_struct *vma, unsigned long addr)
1902 {
1903 	struct page *page;
1904 	struct mempolicy *mpol;
1905 	gfp_t gfp_mask = htlb_alloc_mask(h);
1906 	int nid;
1907 	nodemask_t *nodemask;
1908 
1909 	nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1910 	page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1911 	mpol_cond_put(mpol);
1912 
1913 	return page;
1914 }
1915 
1916 /* page migration callback function */
1917 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1918 		nodemask_t *nmask, gfp_t gfp_mask)
1919 {
1920 	spin_lock_irq(&hugetlb_lock);
1921 	if (h->free_huge_pages - h->resv_huge_pages > 0) {
1922 		struct page *page;
1923 
1924 		page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1925 		if (page) {
1926 			spin_unlock_irq(&hugetlb_lock);
1927 			return page;
1928 		}
1929 	}
1930 	spin_unlock_irq(&hugetlb_lock);
1931 
1932 	return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
1933 }
1934 
1935 /* mempolicy aware migration callback */
1936 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1937 		unsigned long address)
1938 {
1939 	struct mempolicy *mpol;
1940 	nodemask_t *nodemask;
1941 	struct page *page;
1942 	gfp_t gfp_mask;
1943 	int node;
1944 
1945 	gfp_mask = htlb_alloc_mask(h);
1946 	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1947 	page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
1948 	mpol_cond_put(mpol);
1949 
1950 	return page;
1951 }
1952 
1953 /*
1954  * Increase the hugetlb pool such that it can accommodate a reservation
1955  * of size 'delta'.
1956  */
1957 static int gather_surplus_pages(struct hstate *h, long delta)
1958 	__must_hold(&hugetlb_lock)
1959 {
1960 	struct list_head surplus_list;
1961 	struct page *page, *tmp;
1962 	int ret;
1963 	long i;
1964 	long needed, allocated;
1965 	bool alloc_ok = true;
1966 
1967 	lockdep_assert_held(&hugetlb_lock);
1968 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1969 	if (needed <= 0) {
1970 		h->resv_huge_pages += delta;
1971 		return 0;
1972 	}
1973 
1974 	allocated = 0;
1975 	INIT_LIST_HEAD(&surplus_list);
1976 
1977 	ret = -ENOMEM;
1978 retry:
1979 	spin_unlock_irq(&hugetlb_lock);
1980 	for (i = 0; i < needed; i++) {
1981 		page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
1982 				NUMA_NO_NODE, NULL);
1983 		if (!page) {
1984 			alloc_ok = false;
1985 			break;
1986 		}
1987 		list_add(&page->lru, &surplus_list);
1988 		cond_resched();
1989 	}
1990 	allocated += i;
1991 
1992 	/*
1993 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
1994 	 * because either resv_huge_pages or free_huge_pages may have changed.
1995 	 */
1996 	spin_lock_irq(&hugetlb_lock);
1997 	needed = (h->resv_huge_pages + delta) -
1998 			(h->free_huge_pages + allocated);
1999 	if (needed > 0) {
2000 		if (alloc_ok)
2001 			goto retry;
2002 		/*
2003 		 * We were not able to allocate enough pages to
2004 		 * satisfy the entire reservation so we free what
2005 		 * we've allocated so far.
2006 		 */
2007 		goto free;
2008 	}
2009 	/*
2010 	 * The surplus_list now contains _at_least_ the number of extra pages
2011 	 * needed to accommodate the reservation.  Add the appropriate number
2012 	 * of pages to the hugetlb pool and free the extras back to the buddy
2013 	 * allocator.  Commit the entire reservation here to prevent another
2014 	 * process from stealing the pages as they are added to the pool but
2015 	 * before they are reserved.
2016 	 */
2017 	needed += allocated;
2018 	h->resv_huge_pages += delta;
2019 	ret = 0;
2020 
2021 	/* Free the needed pages to the hugetlb pool */
2022 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
2023 		int zeroed;
2024 
2025 		if ((--needed) < 0)
2026 			break;
2027 		/*
2028 		 * This page is now managed by the hugetlb allocator and has
2029 		 * no users -- drop the buddy allocator's reference.
2030 		 */
2031 		zeroed = put_page_testzero(page);
2032 		VM_BUG_ON_PAGE(!zeroed, page);
2033 		enqueue_huge_page(h, page);
2034 	}
2035 free:
2036 	spin_unlock_irq(&hugetlb_lock);
2037 
2038 	/* Free unnecessary surplus pages to the buddy allocator */
2039 	list_for_each_entry_safe(page, tmp, &surplus_list, lru)
2040 		put_page(page);
2041 	spin_lock_irq(&hugetlb_lock);
2042 
2043 	return ret;
2044 }
2045 
2046 /*
2047  * This routine has two main purposes:
2048  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2049  *    in unused_resv_pages.  This corresponds to the prior adjustments made
2050  *    to the associated reservation map.
2051  * 2) Free any unused surplus pages that may have been allocated to satisfy
2052  *    the reservation.  As many as unused_resv_pages may be freed.
2053  */
2054 static void return_unused_surplus_pages(struct hstate *h,
2055 					unsigned long unused_resv_pages)
2056 {
2057 	unsigned long nr_pages;
2058 	struct page *page;
2059 	LIST_HEAD(page_list);
2060 
2061 	lockdep_assert_held(&hugetlb_lock);
2062 	/* Uncommit the reservation */
2063 	h->resv_huge_pages -= unused_resv_pages;
2064 
2065 	/* Cannot return gigantic pages currently */
2066 	if (hstate_is_gigantic(h))
2067 		goto out;
2068 
2069 	/*
2070 	 * Part (or even all) of the reservation could have been backed
2071 	 * by pre-allocated pages. Only free surplus pages.
2072 	 */
2073 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2074 
2075 	/*
2076 	 * We want to release as many surplus pages as possible, spread
2077 	 * evenly across all nodes with memory. Iterate across these nodes
2078 	 * until we can no longer free unreserved surplus pages. This occurs
2079 	 * when the nodes with surplus pages have no free pages.
2080 	 * remove_pool_huge_page() will balance the freed pages across the
2081 	 * on-line nodes with memory and will handle the hstate accounting.
2082 	 */
2083 	while (nr_pages--) {
2084 		page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1);
2085 		if (!page)
2086 			goto out;
2087 
2088 		list_add(&page->lru, &page_list);
2089 	}
2090 
2091 out:
2092 	spin_unlock_irq(&hugetlb_lock);
2093 	update_and_free_pages_bulk(h, &page_list);
2094 	spin_lock_irq(&hugetlb_lock);
2095 }
2096 
2097 
2098 /*
2099  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2100  * are used by the huge page allocation routines to manage reservations.
2101  *
2102  * vma_needs_reservation is called to determine if the huge page at addr
2103  * within the vma has an associated reservation.  If a reservation is
2104  * needed, the value 1 is returned.  The caller is then responsible for
2105  * managing the global reservation and subpool usage counts.  After
2106  * the huge page has been allocated, vma_commit_reservation is called
2107  * to add the page to the reservation map.  If the page allocation fails,
2108  * the reservation must be ended instead of committed.  vma_end_reservation
2109  * is called in such cases.
2110  *
2111  * In the normal case, vma_commit_reservation returns the same value
2112  * as the preceding vma_needs_reservation call.  The only time this
2113  * is not the case is if a reserve map was changed between calls.  It
2114  * is the responsibility of the caller to notice the difference and
2115  * take appropriate action.
2116  *
2117  * vma_add_reservation is used in error paths where a reservation must
2118  * be restored when a newly allocated huge page must be freed.  It is
2119  * to be called after calling vma_needs_reservation to determine if a
2120  * reservation exists.
2121  *
2122  * vma_del_reservation is used in error paths where an entry in the reserve
2123  * map was created during huge page allocation and must be removed.  It is to
2124  * be called after calling vma_needs_reservation to determine if a reservation
2125  * exists.
2126  */
2127 enum vma_resv_mode {
2128 	VMA_NEEDS_RESV,
2129 	VMA_COMMIT_RESV,
2130 	VMA_END_RESV,
2131 	VMA_ADD_RESV,
2132 	VMA_DEL_RESV,
2133 };
2134 static long __vma_reservation_common(struct hstate *h,
2135 				struct vm_area_struct *vma, unsigned long addr,
2136 				enum vma_resv_mode mode)
2137 {
2138 	struct resv_map *resv;
2139 	pgoff_t idx;
2140 	long ret;
2141 	long dummy_out_regions_needed;
2142 
2143 	resv = vma_resv_map(vma);
2144 	if (!resv)
2145 		return 1;
2146 
2147 	idx = vma_hugecache_offset(h, vma, addr);
2148 	switch (mode) {
2149 	case VMA_NEEDS_RESV:
2150 		ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2151 		/* We assume that vma_reservation_* routines always operate on
2152 		 * 1 page, and that adding to resv map a 1 page entry can only
2153 		 * ever require 1 region.
2154 		 */
2155 		VM_BUG_ON(dummy_out_regions_needed != 1);
2156 		break;
2157 	case VMA_COMMIT_RESV:
2158 		ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2159 		/* region_add calls of range 1 should never fail. */
2160 		VM_BUG_ON(ret < 0);
2161 		break;
2162 	case VMA_END_RESV:
2163 		region_abort(resv, idx, idx + 1, 1);
2164 		ret = 0;
2165 		break;
2166 	case VMA_ADD_RESV:
2167 		if (vma->vm_flags & VM_MAYSHARE) {
2168 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2169 			/* region_add calls of range 1 should never fail. */
2170 			VM_BUG_ON(ret < 0);
2171 		} else {
2172 			region_abort(resv, idx, idx + 1, 1);
2173 			ret = region_del(resv, idx, idx + 1);
2174 		}
2175 		break;
2176 	case VMA_DEL_RESV:
2177 		if (vma->vm_flags & VM_MAYSHARE) {
2178 			region_abort(resv, idx, idx + 1, 1);
2179 			ret = region_del(resv, idx, idx + 1);
2180 		} else {
2181 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2182 			/* region_add calls of range 1 should never fail. */
2183 			VM_BUG_ON(ret < 0);
2184 		}
2185 		break;
2186 	default:
2187 		BUG();
2188 	}
2189 
2190 	if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
2191 		return ret;
2192 	/*
2193 	 * We know private mapping must have HPAGE_RESV_OWNER set.
2194 	 *
2195 	 * In most cases, reserves always exist for private mappings.
2196 	 * However, a file associated with mapping could have been
2197 	 * hole punched or truncated after reserves were consumed.
2198 	 * As subsequent fault on such a range will not use reserves.
2199 	 * Subtle - The reserve map for private mappings has the
2200 	 * opposite meaning than that of shared mappings.  If NO
2201 	 * entry is in the reserve map, it means a reservation exists.
2202 	 * If an entry exists in the reserve map, it means the
2203 	 * reservation has already been consumed.  As a result, the
2204 	 * return value of this routine is the opposite of the
2205 	 * value returned from reserve map manipulation routines above.
2206 	 */
2207 	if (ret > 0)
2208 		return 0;
2209 	if (ret == 0)
2210 		return 1;
2211 	return ret;
2212 }
2213 
2214 static long vma_needs_reservation(struct hstate *h,
2215 			struct vm_area_struct *vma, unsigned long addr)
2216 {
2217 	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2218 }
2219 
2220 static long vma_commit_reservation(struct hstate *h,
2221 			struct vm_area_struct *vma, unsigned long addr)
2222 {
2223 	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2224 }
2225 
2226 static void vma_end_reservation(struct hstate *h,
2227 			struct vm_area_struct *vma, unsigned long addr)
2228 {
2229 	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2230 }
2231 
2232 static long vma_add_reservation(struct hstate *h,
2233 			struct vm_area_struct *vma, unsigned long addr)
2234 {
2235 	return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2236 }
2237 
2238 static long vma_del_reservation(struct hstate *h,
2239 			struct vm_area_struct *vma, unsigned long addr)
2240 {
2241 	return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2242 }
2243 
2244 /*
2245  * This routine is called to restore reservation information on error paths.
2246  * It should ONLY be called for pages allocated via alloc_huge_page(), and
2247  * the hugetlb mutex should remain held when calling this routine.
2248  *
2249  * It handles two specific cases:
2250  * 1) A reservation was in place and the page consumed the reservation.
2251  *    HPageRestoreReserve is set in the page.
2252  * 2) No reservation was in place for the page, so HPageRestoreReserve is
2253  *    not set.  However, alloc_huge_page always updates the reserve map.
2254  *
2255  * In case 1, free_huge_page later in the error path will increment the
2256  * global reserve count.  But, free_huge_page does not have enough context
2257  * to adjust the reservation map.  This case deals primarily with private
2258  * mappings.  Adjust the reserve map here to be consistent with global
2259  * reserve count adjustments to be made by free_huge_page.  Make sure the
2260  * reserve map indicates there is a reservation present.
2261  *
2262  * In case 2, simply undo reserve map modifications done by alloc_huge_page.
2263  */
2264 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2265 			unsigned long address, struct page *page)
2266 {
2267 	long rc = vma_needs_reservation(h, vma, address);
2268 
2269 	if (HPageRestoreReserve(page)) {
2270 		if (unlikely(rc < 0))
2271 			/*
2272 			 * Rare out of memory condition in reserve map
2273 			 * manipulation.  Clear HPageRestoreReserve so that
2274 			 * global reserve count will not be incremented
2275 			 * by free_huge_page.  This will make it appear
2276 			 * as though the reservation for this page was
2277 			 * consumed.  This may prevent the task from
2278 			 * faulting in the page at a later time.  This
2279 			 * is better than inconsistent global huge page
2280 			 * accounting of reserve counts.
2281 			 */
2282 			ClearHPageRestoreReserve(page);
2283 		else if (rc)
2284 			(void)vma_add_reservation(h, vma, address);
2285 		else
2286 			vma_end_reservation(h, vma, address);
2287 	} else {
2288 		if (!rc) {
2289 			/*
2290 			 * This indicates there is an entry in the reserve map
2291 			 * added by alloc_huge_page.  We know it was added
2292 			 * before the alloc_huge_page call, otherwise
2293 			 * HPageRestoreReserve would be set on the page.
2294 			 * Remove the entry so that a subsequent allocation
2295 			 * does not consume a reservation.
2296 			 */
2297 			rc = vma_del_reservation(h, vma, address);
2298 			if (rc < 0)
2299 				/*
2300 				 * VERY rare out of memory condition.  Since
2301 				 * we can not delete the entry, set
2302 				 * HPageRestoreReserve so that the reserve
2303 				 * count will be incremented when the page
2304 				 * is freed.  This reserve will be consumed
2305 				 * on a subsequent allocation.
2306 				 */
2307 				SetHPageRestoreReserve(page);
2308 		} else if (rc < 0) {
2309 			/*
2310 			 * Rare out of memory condition from
2311 			 * vma_needs_reservation call.  Memory allocation is
2312 			 * only attempted if a new entry is needed.  Therefore,
2313 			 * this implies there is not an entry in the
2314 			 * reserve map.
2315 			 *
2316 			 * For shared mappings, no entry in the map indicates
2317 			 * no reservation.  We are done.
2318 			 */
2319 			if (!(vma->vm_flags & VM_MAYSHARE))
2320 				/*
2321 				 * For private mappings, no entry indicates
2322 				 * a reservation is present.  Since we can
2323 				 * not add an entry, set SetHPageRestoreReserve
2324 				 * on the page so reserve count will be
2325 				 * incremented when freed.  This reserve will
2326 				 * be consumed on a subsequent allocation.
2327 				 */
2328 				SetHPageRestoreReserve(page);
2329 		} else
2330 			/*
2331 			 * No reservation present, do nothing
2332 			 */
2333 			 vma_end_reservation(h, vma, address);
2334 	}
2335 }
2336 
2337 /*
2338  * alloc_and_dissolve_huge_page - Allocate a new page and dissolve the old one
2339  * @h: struct hstate old page belongs to
2340  * @old_page: Old page to dissolve
2341  * @list: List to isolate the page in case we need to
2342  * Returns 0 on success, otherwise negated error.
2343  */
2344 static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
2345 					struct list_head *list)
2346 {
2347 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2348 	int nid = page_to_nid(old_page);
2349 	struct page *new_page;
2350 	int ret = 0;
2351 
2352 	/*
2353 	 * Before dissolving the page, we need to allocate a new one for the
2354 	 * pool to remain stable. Using alloc_buddy_huge_page() allows us to
2355 	 * not having to deal with prep_new_huge_page() and avoids dealing of any
2356 	 * counters. This simplifies and let us do the whole thing under the
2357 	 * lock.
2358 	 */
2359 	new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
2360 	if (!new_page)
2361 		return -ENOMEM;
2362 
2363 retry:
2364 	spin_lock_irq(&hugetlb_lock);
2365 	if (!PageHuge(old_page)) {
2366 		/*
2367 		 * Freed from under us. Drop new_page too.
2368 		 */
2369 		goto free_new;
2370 	} else if (page_count(old_page)) {
2371 		/*
2372 		 * Someone has grabbed the page, try to isolate it here.
2373 		 * Fail with -EBUSY if not possible.
2374 		 */
2375 		spin_unlock_irq(&hugetlb_lock);
2376 		if (!isolate_huge_page(old_page, list))
2377 			ret = -EBUSY;
2378 		spin_lock_irq(&hugetlb_lock);
2379 		goto free_new;
2380 	} else if (!HPageFreed(old_page)) {
2381 		/*
2382 		 * Page's refcount is 0 but it has not been enqueued in the
2383 		 * freelist yet. Race window is small, so we can succeed here if
2384 		 * we retry.
2385 		 */
2386 		spin_unlock_irq(&hugetlb_lock);
2387 		cond_resched();
2388 		goto retry;
2389 	} else {
2390 		/*
2391 		 * Ok, old_page is still a genuine free hugepage. Remove it from
2392 		 * the freelist and decrease the counters. These will be
2393 		 * incremented again when calling __prep_account_new_huge_page()
2394 		 * and enqueue_huge_page() for new_page. The counters will remain
2395 		 * stable since this happens under the lock.
2396 		 */
2397 		remove_hugetlb_page(h, old_page, false);
2398 
2399 		/*
2400 		 * new_page needs to be initialized with the standard hugetlb
2401 		 * state. This is normally done by prep_new_huge_page() but
2402 		 * that takes hugetlb_lock which is already held so we need to
2403 		 * open code it here.
2404 		 * Reference count trick is needed because allocator gives us
2405 		 * referenced page but the pool requires pages with 0 refcount.
2406 		 */
2407 		__prep_new_huge_page(new_page);
2408 		__prep_account_new_huge_page(h, nid);
2409 		page_ref_dec(new_page);
2410 		enqueue_huge_page(h, new_page);
2411 
2412 		/*
2413 		 * Pages have been replaced, we can safely free the old one.
2414 		 */
2415 		spin_unlock_irq(&hugetlb_lock);
2416 		update_and_free_page(h, old_page);
2417 	}
2418 
2419 	return ret;
2420 
2421 free_new:
2422 	spin_unlock_irq(&hugetlb_lock);
2423 	__free_pages(new_page, huge_page_order(h));
2424 
2425 	return ret;
2426 }
2427 
2428 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
2429 {
2430 	struct hstate *h;
2431 	struct page *head;
2432 	int ret = -EBUSY;
2433 
2434 	/*
2435 	 * The page might have been dissolved from under our feet, so make sure
2436 	 * to carefully check the state under the lock.
2437 	 * Return success when racing as if we dissolved the page ourselves.
2438 	 */
2439 	spin_lock_irq(&hugetlb_lock);
2440 	if (PageHuge(page)) {
2441 		head = compound_head(page);
2442 		h = page_hstate(head);
2443 	} else {
2444 		spin_unlock_irq(&hugetlb_lock);
2445 		return 0;
2446 	}
2447 	spin_unlock_irq(&hugetlb_lock);
2448 
2449 	/*
2450 	 * Fence off gigantic pages as there is a cyclic dependency between
2451 	 * alloc_contig_range and them. Return -ENOMEM as this has the effect
2452 	 * of bailing out right away without further retrying.
2453 	 */
2454 	if (hstate_is_gigantic(h))
2455 		return -ENOMEM;
2456 
2457 	if (page_count(head) && isolate_huge_page(head, list))
2458 		ret = 0;
2459 	else if (!page_count(head))
2460 		ret = alloc_and_dissolve_huge_page(h, head, list);
2461 
2462 	return ret;
2463 }
2464 
2465 struct page *alloc_huge_page(struct vm_area_struct *vma,
2466 				    unsigned long addr, int avoid_reserve)
2467 {
2468 	struct hugepage_subpool *spool = subpool_vma(vma);
2469 	struct hstate *h = hstate_vma(vma);
2470 	struct page *page;
2471 	long map_chg, map_commit;
2472 	long gbl_chg;
2473 	int ret, idx;
2474 	struct hugetlb_cgroup *h_cg;
2475 	bool deferred_reserve;
2476 
2477 	idx = hstate_index(h);
2478 	/*
2479 	 * Examine the region/reserve map to determine if the process
2480 	 * has a reservation for the page to be allocated.  A return
2481 	 * code of zero indicates a reservation exists (no change).
2482 	 */
2483 	map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2484 	if (map_chg < 0)
2485 		return ERR_PTR(-ENOMEM);
2486 
2487 	/*
2488 	 * Processes that did not create the mapping will have no
2489 	 * reserves as indicated by the region/reserve map. Check
2490 	 * that the allocation will not exceed the subpool limit.
2491 	 * Allocations for MAP_NORESERVE mappings also need to be
2492 	 * checked against any subpool limit.
2493 	 */
2494 	if (map_chg || avoid_reserve) {
2495 		gbl_chg = hugepage_subpool_get_pages(spool, 1);
2496 		if (gbl_chg < 0) {
2497 			vma_end_reservation(h, vma, addr);
2498 			return ERR_PTR(-ENOSPC);
2499 		}
2500 
2501 		/*
2502 		 * Even though there was no reservation in the region/reserve
2503 		 * map, there could be reservations associated with the
2504 		 * subpool that can be used.  This would be indicated if the
2505 		 * return value of hugepage_subpool_get_pages() is zero.
2506 		 * However, if avoid_reserve is specified we still avoid even
2507 		 * the subpool reservations.
2508 		 */
2509 		if (avoid_reserve)
2510 			gbl_chg = 1;
2511 	}
2512 
2513 	/* If this allocation is not consuming a reservation, charge it now.
2514 	 */
2515 	deferred_reserve = map_chg || avoid_reserve;
2516 	if (deferred_reserve) {
2517 		ret = hugetlb_cgroup_charge_cgroup_rsvd(
2518 			idx, pages_per_huge_page(h), &h_cg);
2519 		if (ret)
2520 			goto out_subpool_put;
2521 	}
2522 
2523 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2524 	if (ret)
2525 		goto out_uncharge_cgroup_reservation;
2526 
2527 	spin_lock_irq(&hugetlb_lock);
2528 	/*
2529 	 * glb_chg is passed to indicate whether or not a page must be taken
2530 	 * from the global free pool (global change).  gbl_chg == 0 indicates
2531 	 * a reservation exists for the allocation.
2532 	 */
2533 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2534 	if (!page) {
2535 		spin_unlock_irq(&hugetlb_lock);
2536 		page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2537 		if (!page)
2538 			goto out_uncharge_cgroup;
2539 		if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2540 			SetHPageRestoreReserve(page);
2541 			h->resv_huge_pages--;
2542 		}
2543 		spin_lock_irq(&hugetlb_lock);
2544 		list_add(&page->lru, &h->hugepage_activelist);
2545 		/* Fall through */
2546 	}
2547 	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2548 	/* If allocation is not consuming a reservation, also store the
2549 	 * hugetlb_cgroup pointer on the page.
2550 	 */
2551 	if (deferred_reserve) {
2552 		hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
2553 						  h_cg, page);
2554 	}
2555 
2556 	spin_unlock_irq(&hugetlb_lock);
2557 
2558 	hugetlb_set_page_subpool(page, spool);
2559 
2560 	map_commit = vma_commit_reservation(h, vma, addr);
2561 	if (unlikely(map_chg > map_commit)) {
2562 		/*
2563 		 * The page was added to the reservation map between
2564 		 * vma_needs_reservation and vma_commit_reservation.
2565 		 * This indicates a race with hugetlb_reserve_pages.
2566 		 * Adjust for the subpool count incremented above AND
2567 		 * in hugetlb_reserve_pages for the same page.  Also,
2568 		 * the reservation count added in hugetlb_reserve_pages
2569 		 * no longer applies.
2570 		 */
2571 		long rsv_adjust;
2572 
2573 		rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2574 		hugetlb_acct_memory(h, -rsv_adjust);
2575 		if (deferred_reserve)
2576 			hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
2577 					pages_per_huge_page(h), page);
2578 	}
2579 	return page;
2580 
2581 out_uncharge_cgroup:
2582 	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2583 out_uncharge_cgroup_reservation:
2584 	if (deferred_reserve)
2585 		hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
2586 						    h_cg);
2587 out_subpool_put:
2588 	if (map_chg || avoid_reserve)
2589 		hugepage_subpool_put_pages(spool, 1);
2590 	vma_end_reservation(h, vma, addr);
2591 	return ERR_PTR(-ENOSPC);
2592 }
2593 
2594 int alloc_bootmem_huge_page(struct hstate *h)
2595 	__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2596 int __alloc_bootmem_huge_page(struct hstate *h)
2597 {
2598 	struct huge_bootmem_page *m;
2599 	int nr_nodes, node;
2600 
2601 	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2602 		void *addr;
2603 
2604 		addr = memblock_alloc_try_nid_raw(
2605 				huge_page_size(h), huge_page_size(h),
2606 				0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
2607 		if (addr) {
2608 			/*
2609 			 * Use the beginning of the huge page to store the
2610 			 * huge_bootmem_page struct (until gather_bootmem
2611 			 * puts them into the mem_map).
2612 			 */
2613 			m = addr;
2614 			goto found;
2615 		}
2616 	}
2617 	return 0;
2618 
2619 found:
2620 	BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2621 	/* Put them into a private list first because mem_map is not up yet */
2622 	INIT_LIST_HEAD(&m->list);
2623 	list_add(&m->list, &huge_boot_pages);
2624 	m->hstate = h;
2625 	return 1;
2626 }
2627 
2628 static void __init prep_compound_huge_page(struct page *page,
2629 		unsigned int order)
2630 {
2631 	if (unlikely(order > (MAX_ORDER - 1)))
2632 		prep_compound_gigantic_page(page, order);
2633 	else
2634 		prep_compound_page(page, order);
2635 }
2636 
2637 /* Put bootmem huge pages into the standard lists after mem_map is up */
2638 static void __init gather_bootmem_prealloc(void)
2639 {
2640 	struct huge_bootmem_page *m;
2641 
2642 	list_for_each_entry(m, &huge_boot_pages, list) {
2643 		struct page *page = virt_to_page(m);
2644 		struct hstate *h = m->hstate;
2645 
2646 		WARN_ON(page_count(page) != 1);
2647 		prep_compound_huge_page(page, huge_page_order(h));
2648 		WARN_ON(PageReserved(page));
2649 		prep_new_huge_page(h, page, page_to_nid(page));
2650 		put_page(page); /* free it into the hugepage allocator */
2651 
2652 		/*
2653 		 * If we had gigantic hugepages allocated at boot time, we need
2654 		 * to restore the 'stolen' pages to totalram_pages in order to
2655 		 * fix confusing memory reports from free(1) and another
2656 		 * side-effects, like CommitLimit going negative.
2657 		 */
2658 		if (hstate_is_gigantic(h))
2659 			adjust_managed_page_count(page, pages_per_huge_page(h));
2660 		cond_resched();
2661 	}
2662 }
2663 
2664 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2665 {
2666 	unsigned long i;
2667 	nodemask_t *node_alloc_noretry;
2668 
2669 	if (!hstate_is_gigantic(h)) {
2670 		/*
2671 		 * Bit mask controlling how hard we retry per-node allocations.
2672 		 * Ignore errors as lower level routines can deal with
2673 		 * node_alloc_noretry == NULL.  If this kmalloc fails at boot
2674 		 * time, we are likely in bigger trouble.
2675 		 */
2676 		node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
2677 						GFP_KERNEL);
2678 	} else {
2679 		/* allocations done at boot time */
2680 		node_alloc_noretry = NULL;
2681 	}
2682 
2683 	/* bit mask controlling how hard we retry per-node allocations */
2684 	if (node_alloc_noretry)
2685 		nodes_clear(*node_alloc_noretry);
2686 
2687 	for (i = 0; i < h->max_huge_pages; ++i) {
2688 		if (hstate_is_gigantic(h)) {
2689 			if (hugetlb_cma_size) {
2690 				pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
2691 				goto free;
2692 			}
2693 			if (!alloc_bootmem_huge_page(h))
2694 				break;
2695 		} else if (!alloc_pool_huge_page(h,
2696 					 &node_states[N_MEMORY],
2697 					 node_alloc_noretry))
2698 			break;
2699 		cond_resched();
2700 	}
2701 	if (i < h->max_huge_pages) {
2702 		char buf[32];
2703 
2704 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2705 		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
2706 			h->max_huge_pages, buf, i);
2707 		h->max_huge_pages = i;
2708 	}
2709 free:
2710 	kfree(node_alloc_noretry);
2711 }
2712 
2713 static void __init hugetlb_init_hstates(void)
2714 {
2715 	struct hstate *h;
2716 
2717 	for_each_hstate(h) {
2718 		if (minimum_order > huge_page_order(h))
2719 			minimum_order = huge_page_order(h);
2720 
2721 		/* oversize hugepages were init'ed in early boot */
2722 		if (!hstate_is_gigantic(h))
2723 			hugetlb_hstate_alloc_pages(h);
2724 	}
2725 	VM_BUG_ON(minimum_order == UINT_MAX);
2726 }
2727 
2728 static void __init report_hugepages(void)
2729 {
2730 	struct hstate *h;
2731 
2732 	for_each_hstate(h) {
2733 		char buf[32];
2734 
2735 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2736 		pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2737 			buf, h->free_huge_pages);
2738 	}
2739 }
2740 
2741 #ifdef CONFIG_HIGHMEM
2742 static void try_to_free_low(struct hstate *h, unsigned long count,
2743 						nodemask_t *nodes_allowed)
2744 {
2745 	int i;
2746 	LIST_HEAD(page_list);
2747 
2748 	lockdep_assert_held(&hugetlb_lock);
2749 	if (hstate_is_gigantic(h))
2750 		return;
2751 
2752 	/*
2753 	 * Collect pages to be freed on a list, and free after dropping lock
2754 	 */
2755 	for_each_node_mask(i, *nodes_allowed) {
2756 		struct page *page, *next;
2757 		struct list_head *freel = &h->hugepage_freelists[i];
2758 		list_for_each_entry_safe(page, next, freel, lru) {
2759 			if (count >= h->nr_huge_pages)
2760 				goto out;
2761 			if (PageHighMem(page))
2762 				continue;
2763 			remove_hugetlb_page(h, page, false);
2764 			list_add(&page->lru, &page_list);
2765 		}
2766 	}
2767 
2768 out:
2769 	spin_unlock_irq(&hugetlb_lock);
2770 	update_and_free_pages_bulk(h, &page_list);
2771 	spin_lock_irq(&hugetlb_lock);
2772 }
2773 #else
2774 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2775 						nodemask_t *nodes_allowed)
2776 {
2777 }
2778 #endif
2779 
2780 /*
2781  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
2782  * balanced by operating on them in a round-robin fashion.
2783  * Returns 1 if an adjustment was made.
2784  */
2785 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2786 				int delta)
2787 {
2788 	int nr_nodes, node;
2789 
2790 	lockdep_assert_held(&hugetlb_lock);
2791 	VM_BUG_ON(delta != -1 && delta != 1);
2792 
2793 	if (delta < 0) {
2794 		for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2795 			if (h->surplus_huge_pages_node[node])
2796 				goto found;
2797 		}
2798 	} else {
2799 		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2800 			if (h->surplus_huge_pages_node[node] <
2801 					h->nr_huge_pages_node[node])
2802 				goto found;
2803 		}
2804 	}
2805 	return 0;
2806 
2807 found:
2808 	h->surplus_huge_pages += delta;
2809 	h->surplus_huge_pages_node[node] += delta;
2810 	return 1;
2811 }
2812 
2813 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2814 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
2815 			      nodemask_t *nodes_allowed)
2816 {
2817 	unsigned long min_count, ret;
2818 	struct page *page;
2819 	LIST_HEAD(page_list);
2820 	NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
2821 
2822 	/*
2823 	 * Bit mask controlling how hard we retry per-node allocations.
2824 	 * If we can not allocate the bit mask, do not attempt to allocate
2825 	 * the requested huge pages.
2826 	 */
2827 	if (node_alloc_noretry)
2828 		nodes_clear(*node_alloc_noretry);
2829 	else
2830 		return -ENOMEM;
2831 
2832 	/*
2833 	 * resize_lock mutex prevents concurrent adjustments to number of
2834 	 * pages in hstate via the proc/sysfs interfaces.
2835 	 */
2836 	mutex_lock(&h->resize_lock);
2837 	spin_lock_irq(&hugetlb_lock);
2838 
2839 	/*
2840 	 * Check for a node specific request.
2841 	 * Changing node specific huge page count may require a corresponding
2842 	 * change to the global count.  In any case, the passed node mask
2843 	 * (nodes_allowed) will restrict alloc/free to the specified node.
2844 	 */
2845 	if (nid != NUMA_NO_NODE) {
2846 		unsigned long old_count = count;
2847 
2848 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2849 		/*
2850 		 * User may have specified a large count value which caused the
2851 		 * above calculation to overflow.  In this case, they wanted
2852 		 * to allocate as many huge pages as possible.  Set count to
2853 		 * largest possible value to align with their intention.
2854 		 */
2855 		if (count < old_count)
2856 			count = ULONG_MAX;
2857 	}
2858 
2859 	/*
2860 	 * Gigantic pages runtime allocation depend on the capability for large
2861 	 * page range allocation.
2862 	 * If the system does not provide this feature, return an error when
2863 	 * the user tries to allocate gigantic pages but let the user free the
2864 	 * boottime allocated gigantic pages.
2865 	 */
2866 	if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
2867 		if (count > persistent_huge_pages(h)) {
2868 			spin_unlock_irq(&hugetlb_lock);
2869 			mutex_unlock(&h->resize_lock);
2870 			NODEMASK_FREE(node_alloc_noretry);
2871 			return -EINVAL;
2872 		}
2873 		/* Fall through to decrease pool */
2874 	}
2875 
2876 	/*
2877 	 * Increase the pool size
2878 	 * First take pages out of surplus state.  Then make up the
2879 	 * remaining difference by allocating fresh huge pages.
2880 	 *
2881 	 * We might race with alloc_surplus_huge_page() here and be unable
2882 	 * to convert a surplus huge page to a normal huge page. That is
2883 	 * not critical, though, it just means the overall size of the
2884 	 * pool might be one hugepage larger than it needs to be, but
2885 	 * within all the constraints specified by the sysctls.
2886 	 */
2887 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2888 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
2889 			break;
2890 	}
2891 
2892 	while (count > persistent_huge_pages(h)) {
2893 		/*
2894 		 * If this allocation races such that we no longer need the
2895 		 * page, free_huge_page will handle it by freeing the page
2896 		 * and reducing the surplus.
2897 		 */
2898 		spin_unlock_irq(&hugetlb_lock);
2899 
2900 		/* yield cpu to avoid soft lockup */
2901 		cond_resched();
2902 
2903 		ret = alloc_pool_huge_page(h, nodes_allowed,
2904 						node_alloc_noretry);
2905 		spin_lock_irq(&hugetlb_lock);
2906 		if (!ret)
2907 			goto out;
2908 
2909 		/* Bail for signals. Probably ctrl-c from user */
2910 		if (signal_pending(current))
2911 			goto out;
2912 	}
2913 
2914 	/*
2915 	 * Decrease the pool size
2916 	 * First return free pages to the buddy allocator (being careful
2917 	 * to keep enough around to satisfy reservations).  Then place
2918 	 * pages into surplus state as needed so the pool will shrink
2919 	 * to the desired size as pages become free.
2920 	 *
2921 	 * By placing pages into the surplus state independent of the
2922 	 * overcommit value, we are allowing the surplus pool size to
2923 	 * exceed overcommit. There are few sane options here. Since
2924 	 * alloc_surplus_huge_page() is checking the global counter,
2925 	 * though, we'll note that we're not allowed to exceed surplus
2926 	 * and won't grow the pool anywhere else. Not until one of the
2927 	 * sysctls are changed, or the surplus pages go out of use.
2928 	 */
2929 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2930 	min_count = max(count, min_count);
2931 	try_to_free_low(h, min_count, nodes_allowed);
2932 
2933 	/*
2934 	 * Collect pages to be removed on list without dropping lock
2935 	 */
2936 	while (min_count < persistent_huge_pages(h)) {
2937 		page = remove_pool_huge_page(h, nodes_allowed, 0);
2938 		if (!page)
2939 			break;
2940 
2941 		list_add(&page->lru, &page_list);
2942 	}
2943 	/* free the pages after dropping lock */
2944 	spin_unlock_irq(&hugetlb_lock);
2945 	update_and_free_pages_bulk(h, &page_list);
2946 	spin_lock_irq(&hugetlb_lock);
2947 
2948 	while (count < persistent_huge_pages(h)) {
2949 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
2950 			break;
2951 	}
2952 out:
2953 	h->max_huge_pages = persistent_huge_pages(h);
2954 	spin_unlock_irq(&hugetlb_lock);
2955 	mutex_unlock(&h->resize_lock);
2956 
2957 	NODEMASK_FREE(node_alloc_noretry);
2958 
2959 	return 0;
2960 }
2961 
2962 #define HSTATE_ATTR_RO(_name) \
2963 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2964 
2965 #define HSTATE_ATTR(_name) \
2966 	static struct kobj_attribute _name##_attr = \
2967 		__ATTR(_name, 0644, _name##_show, _name##_store)
2968 
2969 static struct kobject *hugepages_kobj;
2970 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2971 
2972 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2973 
2974 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2975 {
2976 	int i;
2977 
2978 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
2979 		if (hstate_kobjs[i] == kobj) {
2980 			if (nidp)
2981 				*nidp = NUMA_NO_NODE;
2982 			return &hstates[i];
2983 		}
2984 
2985 	return kobj_to_node_hstate(kobj, nidp);
2986 }
2987 
2988 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2989 					struct kobj_attribute *attr, char *buf)
2990 {
2991 	struct hstate *h;
2992 	unsigned long nr_huge_pages;
2993 	int nid;
2994 
2995 	h = kobj_to_hstate(kobj, &nid);
2996 	if (nid == NUMA_NO_NODE)
2997 		nr_huge_pages = h->nr_huge_pages;
2998 	else
2999 		nr_huge_pages = h->nr_huge_pages_node[nid];
3000 
3001 	return sysfs_emit(buf, "%lu\n", nr_huge_pages);
3002 }
3003 
3004 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
3005 					   struct hstate *h, int nid,
3006 					   unsigned long count, size_t len)
3007 {
3008 	int err;
3009 	nodemask_t nodes_allowed, *n_mask;
3010 
3011 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
3012 		return -EINVAL;
3013 
3014 	if (nid == NUMA_NO_NODE) {
3015 		/*
3016 		 * global hstate attribute
3017 		 */
3018 		if (!(obey_mempolicy &&
3019 				init_nodemask_of_mempolicy(&nodes_allowed)))
3020 			n_mask = &node_states[N_MEMORY];
3021 		else
3022 			n_mask = &nodes_allowed;
3023 	} else {
3024 		/*
3025 		 * Node specific request.  count adjustment happens in
3026 		 * set_max_huge_pages() after acquiring hugetlb_lock.
3027 		 */
3028 		init_nodemask_of_node(&nodes_allowed, nid);
3029 		n_mask = &nodes_allowed;
3030 	}
3031 
3032 	err = set_max_huge_pages(h, count, nid, n_mask);
3033 
3034 	return err ? err : len;
3035 }
3036 
3037 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
3038 					 struct kobject *kobj, const char *buf,
3039 					 size_t len)
3040 {
3041 	struct hstate *h;
3042 	unsigned long count;
3043 	int nid;
3044 	int err;
3045 
3046 	err = kstrtoul(buf, 10, &count);
3047 	if (err)
3048 		return err;
3049 
3050 	h = kobj_to_hstate(kobj, &nid);
3051 	return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
3052 }
3053 
3054 static ssize_t nr_hugepages_show(struct kobject *kobj,
3055 				       struct kobj_attribute *attr, char *buf)
3056 {
3057 	return nr_hugepages_show_common(kobj, attr, buf);
3058 }
3059 
3060 static ssize_t nr_hugepages_store(struct kobject *kobj,
3061 	       struct kobj_attribute *attr, const char *buf, size_t len)
3062 {
3063 	return nr_hugepages_store_common(false, kobj, buf, len);
3064 }
3065 HSTATE_ATTR(nr_hugepages);
3066 
3067 #ifdef CONFIG_NUMA
3068 
3069 /*
3070  * hstate attribute for optionally mempolicy-based constraint on persistent
3071  * huge page alloc/free.
3072  */
3073 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
3074 					   struct kobj_attribute *attr,
3075 					   char *buf)
3076 {
3077 	return nr_hugepages_show_common(kobj, attr, buf);
3078 }
3079 
3080 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
3081 	       struct kobj_attribute *attr, const char *buf, size_t len)
3082 {
3083 	return nr_hugepages_store_common(true, kobj, buf, len);
3084 }
3085 HSTATE_ATTR(nr_hugepages_mempolicy);
3086 #endif
3087 
3088 
3089 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
3090 					struct kobj_attribute *attr, char *buf)
3091 {
3092 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3093 	return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages);
3094 }
3095 
3096 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
3097 		struct kobj_attribute *attr, const char *buf, size_t count)
3098 {
3099 	int err;
3100 	unsigned long input;
3101 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3102 
3103 	if (hstate_is_gigantic(h))
3104 		return -EINVAL;
3105 
3106 	err = kstrtoul(buf, 10, &input);
3107 	if (err)
3108 		return err;
3109 
3110 	spin_lock_irq(&hugetlb_lock);
3111 	h->nr_overcommit_huge_pages = input;
3112 	spin_unlock_irq(&hugetlb_lock);
3113 
3114 	return count;
3115 }
3116 HSTATE_ATTR(nr_overcommit_hugepages);
3117 
3118 static ssize_t free_hugepages_show(struct kobject *kobj,
3119 					struct kobj_attribute *attr, char *buf)
3120 {
3121 	struct hstate *h;
3122 	unsigned long free_huge_pages;
3123 	int nid;
3124 
3125 	h = kobj_to_hstate(kobj, &nid);
3126 	if (nid == NUMA_NO_NODE)
3127 		free_huge_pages = h->free_huge_pages;
3128 	else
3129 		free_huge_pages = h->free_huge_pages_node[nid];
3130 
3131 	return sysfs_emit(buf, "%lu\n", free_huge_pages);
3132 }
3133 HSTATE_ATTR_RO(free_hugepages);
3134 
3135 static ssize_t resv_hugepages_show(struct kobject *kobj,
3136 					struct kobj_attribute *attr, char *buf)
3137 {
3138 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3139 	return sysfs_emit(buf, "%lu\n", h->resv_huge_pages);
3140 }
3141 HSTATE_ATTR_RO(resv_hugepages);
3142 
3143 static ssize_t surplus_hugepages_show(struct kobject *kobj,
3144 					struct kobj_attribute *attr, char *buf)
3145 {
3146 	struct hstate *h;
3147 	unsigned long surplus_huge_pages;
3148 	int nid;
3149 
3150 	h = kobj_to_hstate(kobj, &nid);
3151 	if (nid == NUMA_NO_NODE)
3152 		surplus_huge_pages = h->surplus_huge_pages;
3153 	else
3154 		surplus_huge_pages = h->surplus_huge_pages_node[nid];
3155 
3156 	return sysfs_emit(buf, "%lu\n", surplus_huge_pages);
3157 }
3158 HSTATE_ATTR_RO(surplus_hugepages);
3159 
3160 static struct attribute *hstate_attrs[] = {
3161 	&nr_hugepages_attr.attr,
3162 	&nr_overcommit_hugepages_attr.attr,
3163 	&free_hugepages_attr.attr,
3164 	&resv_hugepages_attr.attr,
3165 	&surplus_hugepages_attr.attr,
3166 #ifdef CONFIG_NUMA
3167 	&nr_hugepages_mempolicy_attr.attr,
3168 #endif
3169 	NULL,
3170 };
3171 
3172 static const struct attribute_group hstate_attr_group = {
3173 	.attrs = hstate_attrs,
3174 };
3175 
3176 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
3177 				    struct kobject **hstate_kobjs,
3178 				    const struct attribute_group *hstate_attr_group)
3179 {
3180 	int retval;
3181 	int hi = hstate_index(h);
3182 
3183 	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
3184 	if (!hstate_kobjs[hi])
3185 		return -ENOMEM;
3186 
3187 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
3188 	if (retval) {
3189 		kobject_put(hstate_kobjs[hi]);
3190 		hstate_kobjs[hi] = NULL;
3191 	}
3192 
3193 	return retval;
3194 }
3195 
3196 static void __init hugetlb_sysfs_init(void)
3197 {
3198 	struct hstate *h;
3199 	int err;
3200 
3201 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
3202 	if (!hugepages_kobj)
3203 		return;
3204 
3205 	for_each_hstate(h) {
3206 		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
3207 					 hstate_kobjs, &hstate_attr_group);
3208 		if (err)
3209 			pr_err("HugeTLB: Unable to add hstate %s", h->name);
3210 	}
3211 }
3212 
3213 #ifdef CONFIG_NUMA
3214 
3215 /*
3216  * node_hstate/s - associate per node hstate attributes, via their kobjects,
3217  * with node devices in node_devices[] using a parallel array.  The array
3218  * index of a node device or _hstate == node id.
3219  * This is here to avoid any static dependency of the node device driver, in
3220  * the base kernel, on the hugetlb module.
3221  */
3222 struct node_hstate {
3223 	struct kobject		*hugepages_kobj;
3224 	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
3225 };
3226 static struct node_hstate node_hstates[MAX_NUMNODES];
3227 
3228 /*
3229  * A subset of global hstate attributes for node devices
3230  */
3231 static struct attribute *per_node_hstate_attrs[] = {
3232 	&nr_hugepages_attr.attr,
3233 	&free_hugepages_attr.attr,
3234 	&surplus_hugepages_attr.attr,
3235 	NULL,
3236 };
3237 
3238 static const struct attribute_group per_node_hstate_attr_group = {
3239 	.attrs = per_node_hstate_attrs,
3240 };
3241 
3242 /*
3243  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
3244  * Returns node id via non-NULL nidp.
3245  */
3246 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
3247 {
3248 	int nid;
3249 
3250 	for (nid = 0; nid < nr_node_ids; nid++) {
3251 		struct node_hstate *nhs = &node_hstates[nid];
3252 		int i;
3253 		for (i = 0; i < HUGE_MAX_HSTATE; i++)
3254 			if (nhs->hstate_kobjs[i] == kobj) {
3255 				if (nidp)
3256 					*nidp = nid;
3257 				return &hstates[i];
3258 			}
3259 	}
3260 
3261 	BUG();
3262 	return NULL;
3263 }
3264 
3265 /*
3266  * Unregister hstate attributes from a single node device.
3267  * No-op if no hstate attributes attached.
3268  */
3269 static void hugetlb_unregister_node(struct node *node)
3270 {
3271 	struct hstate *h;
3272 	struct node_hstate *nhs = &node_hstates[node->dev.id];
3273 
3274 	if (!nhs->hugepages_kobj)
3275 		return;		/* no hstate attributes */
3276 
3277 	for_each_hstate(h) {
3278 		int idx = hstate_index(h);
3279 		if (nhs->hstate_kobjs[idx]) {
3280 			kobject_put(nhs->hstate_kobjs[idx]);
3281 			nhs->hstate_kobjs[idx] = NULL;
3282 		}
3283 	}
3284 
3285 	kobject_put(nhs->hugepages_kobj);
3286 	nhs->hugepages_kobj = NULL;
3287 }
3288 
3289 
3290 /*
3291  * Register hstate attributes for a single node device.
3292  * No-op if attributes already registered.
3293  */
3294 static void hugetlb_register_node(struct node *node)
3295 {
3296 	struct hstate *h;
3297 	struct node_hstate *nhs = &node_hstates[node->dev.id];
3298 	int err;
3299 
3300 	if (nhs->hugepages_kobj)
3301 		return;		/* already allocated */
3302 
3303 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
3304 							&node->dev.kobj);
3305 	if (!nhs->hugepages_kobj)
3306 		return;
3307 
3308 	for_each_hstate(h) {
3309 		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
3310 						nhs->hstate_kobjs,
3311 						&per_node_hstate_attr_group);
3312 		if (err) {
3313 			pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
3314 				h->name, node->dev.id);
3315 			hugetlb_unregister_node(node);
3316 			break;
3317 		}
3318 	}
3319 }
3320 
3321 /*
3322  * hugetlb init time:  register hstate attributes for all registered node
3323  * devices of nodes that have memory.  All on-line nodes should have
3324  * registered their associated device by this time.
3325  */
3326 static void __init hugetlb_register_all_nodes(void)
3327 {
3328 	int nid;
3329 
3330 	for_each_node_state(nid, N_MEMORY) {
3331 		struct node *node = node_devices[nid];
3332 		if (node->dev.id == nid)
3333 			hugetlb_register_node(node);
3334 	}
3335 
3336 	/*
3337 	 * Let the node device driver know we're here so it can
3338 	 * [un]register hstate attributes on node hotplug.
3339 	 */
3340 	register_hugetlbfs_with_node(hugetlb_register_node,
3341 				     hugetlb_unregister_node);
3342 }
3343 #else	/* !CONFIG_NUMA */
3344 
3345 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
3346 {
3347 	BUG();
3348 	if (nidp)
3349 		*nidp = -1;
3350 	return NULL;
3351 }
3352 
3353 static void hugetlb_register_all_nodes(void) { }
3354 
3355 #endif
3356 
3357 static int __init hugetlb_init(void)
3358 {
3359 	int i;
3360 
3361 	BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
3362 			__NR_HPAGEFLAGS);
3363 
3364 	if (!hugepages_supported()) {
3365 		if (hugetlb_max_hstate || default_hstate_max_huge_pages)
3366 			pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
3367 		return 0;
3368 	}
3369 
3370 	/*
3371 	 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists.  Some
3372 	 * architectures depend on setup being done here.
3373 	 */
3374 	hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
3375 	if (!parsed_default_hugepagesz) {
3376 		/*
3377 		 * If we did not parse a default huge page size, set
3378 		 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
3379 		 * number of huge pages for this default size was implicitly
3380 		 * specified, set that here as well.
3381 		 * Note that the implicit setting will overwrite an explicit
3382 		 * setting.  A warning will be printed in this case.
3383 		 */
3384 		default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
3385 		if (default_hstate_max_huge_pages) {
3386 			if (default_hstate.max_huge_pages) {
3387 				char buf[32];
3388 
3389 				string_get_size(huge_page_size(&default_hstate),
3390 					1, STRING_UNITS_2, buf, 32);
3391 				pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
3392 					default_hstate.max_huge_pages, buf);
3393 				pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
3394 					default_hstate_max_huge_pages);
3395 			}
3396 			default_hstate.max_huge_pages =
3397 				default_hstate_max_huge_pages;
3398 		}
3399 	}
3400 
3401 	hugetlb_cma_check();
3402 	hugetlb_init_hstates();
3403 	gather_bootmem_prealloc();
3404 	report_hugepages();
3405 
3406 	hugetlb_sysfs_init();
3407 	hugetlb_register_all_nodes();
3408 	hugetlb_cgroup_file_init();
3409 
3410 #ifdef CONFIG_SMP
3411 	num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
3412 #else
3413 	num_fault_mutexes = 1;
3414 #endif
3415 	hugetlb_fault_mutex_table =
3416 		kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
3417 			      GFP_KERNEL);
3418 	BUG_ON(!hugetlb_fault_mutex_table);
3419 
3420 	for (i = 0; i < num_fault_mutexes; i++)
3421 		mutex_init(&hugetlb_fault_mutex_table[i]);
3422 	return 0;
3423 }
3424 subsys_initcall(hugetlb_init);
3425 
3426 /* Overwritten by architectures with more huge page sizes */
3427 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
3428 {
3429 	return size == HPAGE_SIZE;
3430 }
3431 
3432 void __init hugetlb_add_hstate(unsigned int order)
3433 {
3434 	struct hstate *h;
3435 	unsigned long i;
3436 
3437 	if (size_to_hstate(PAGE_SIZE << order)) {
3438 		return;
3439 	}
3440 	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
3441 	BUG_ON(order == 0);
3442 	h = &hstates[hugetlb_max_hstate++];
3443 	mutex_init(&h->resize_lock);
3444 	h->order = order;
3445 	h->mask = ~(huge_page_size(h) - 1);
3446 	for (i = 0; i < MAX_NUMNODES; ++i)
3447 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
3448 	INIT_LIST_HEAD(&h->hugepage_activelist);
3449 	h->next_nid_to_alloc = first_memory_node;
3450 	h->next_nid_to_free = first_memory_node;
3451 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
3452 					huge_page_size(h)/1024);
3453 
3454 	parsed_hstate = h;
3455 }
3456 
3457 /*
3458  * hugepages command line processing
3459  * hugepages normally follows a valid hugepagsz or default_hugepagsz
3460  * specification.  If not, ignore the hugepages value.  hugepages can also
3461  * be the first huge page command line  option in which case it implicitly
3462  * specifies the number of huge pages for the default size.
3463  */
3464 static int __init hugepages_setup(char *s)
3465 {
3466 	unsigned long *mhp;
3467 	static unsigned long *last_mhp;
3468 
3469 	if (!parsed_valid_hugepagesz) {
3470 		pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
3471 		parsed_valid_hugepagesz = true;
3472 		return 0;
3473 	}
3474 
3475 	/*
3476 	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
3477 	 * yet, so this hugepages= parameter goes to the "default hstate".
3478 	 * Otherwise, it goes with the previously parsed hugepagesz or
3479 	 * default_hugepagesz.
3480 	 */
3481 	else if (!hugetlb_max_hstate)
3482 		mhp = &default_hstate_max_huge_pages;
3483 	else
3484 		mhp = &parsed_hstate->max_huge_pages;
3485 
3486 	if (mhp == last_mhp) {
3487 		pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
3488 		return 0;
3489 	}
3490 
3491 	if (sscanf(s, "%lu", mhp) <= 0)
3492 		*mhp = 0;
3493 
3494 	/*
3495 	 * Global state is always initialized later in hugetlb_init.
3496 	 * But we need to allocate gigantic hstates here early to still
3497 	 * use the bootmem allocator.
3498 	 */
3499 	if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate))
3500 		hugetlb_hstate_alloc_pages(parsed_hstate);
3501 
3502 	last_mhp = mhp;
3503 
3504 	return 1;
3505 }
3506 __setup("hugepages=", hugepages_setup);
3507 
3508 /*
3509  * hugepagesz command line processing
3510  * A specific huge page size can only be specified once with hugepagesz.
3511  * hugepagesz is followed by hugepages on the command line.  The global
3512  * variable 'parsed_valid_hugepagesz' is used to determine if prior
3513  * hugepagesz argument was valid.
3514  */
3515 static int __init hugepagesz_setup(char *s)
3516 {
3517 	unsigned long size;
3518 	struct hstate *h;
3519 
3520 	parsed_valid_hugepagesz = false;
3521 	size = (unsigned long)memparse(s, NULL);
3522 
3523 	if (!arch_hugetlb_valid_size(size)) {
3524 		pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
3525 		return 0;
3526 	}
3527 
3528 	h = size_to_hstate(size);
3529 	if (h) {
3530 		/*
3531 		 * hstate for this size already exists.  This is normally
3532 		 * an error, but is allowed if the existing hstate is the
3533 		 * default hstate.  More specifically, it is only allowed if
3534 		 * the number of huge pages for the default hstate was not
3535 		 * previously specified.
3536 		 */
3537 		if (!parsed_default_hugepagesz ||  h != &default_hstate ||
3538 		    default_hstate.max_huge_pages) {
3539 			pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
3540 			return 0;
3541 		}
3542 
3543 		/*
3544 		 * No need to call hugetlb_add_hstate() as hstate already
3545 		 * exists.  But, do set parsed_hstate so that a following
3546 		 * hugepages= parameter will be applied to this hstate.
3547 		 */
3548 		parsed_hstate = h;
3549 		parsed_valid_hugepagesz = true;
3550 		return 1;
3551 	}
3552 
3553 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
3554 	parsed_valid_hugepagesz = true;
3555 	return 1;
3556 }
3557 __setup("hugepagesz=", hugepagesz_setup);
3558 
3559 /*
3560  * default_hugepagesz command line input
3561  * Only one instance of default_hugepagesz allowed on command line.
3562  */
3563 static int __init default_hugepagesz_setup(char *s)
3564 {
3565 	unsigned long size;
3566 
3567 	parsed_valid_hugepagesz = false;
3568 	if (parsed_default_hugepagesz) {
3569 		pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
3570 		return 0;
3571 	}
3572 
3573 	size = (unsigned long)memparse(s, NULL);
3574 
3575 	if (!arch_hugetlb_valid_size(size)) {
3576 		pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
3577 		return 0;
3578 	}
3579 
3580 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
3581 	parsed_valid_hugepagesz = true;
3582 	parsed_default_hugepagesz = true;
3583 	default_hstate_idx = hstate_index(size_to_hstate(size));
3584 
3585 	/*
3586 	 * The number of default huge pages (for this size) could have been
3587 	 * specified as the first hugetlb parameter: hugepages=X.  If so,
3588 	 * then default_hstate_max_huge_pages is set.  If the default huge
3589 	 * page size is gigantic (>= MAX_ORDER), then the pages must be
3590 	 * allocated here from bootmem allocator.
3591 	 */
3592 	if (default_hstate_max_huge_pages) {
3593 		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
3594 		if (hstate_is_gigantic(&default_hstate))
3595 			hugetlb_hstate_alloc_pages(&default_hstate);
3596 		default_hstate_max_huge_pages = 0;
3597 	}
3598 
3599 	return 1;
3600 }
3601 __setup("default_hugepagesz=", default_hugepagesz_setup);
3602 
3603 static unsigned int allowed_mems_nr(struct hstate *h)
3604 {
3605 	int node;
3606 	unsigned int nr = 0;
3607 	nodemask_t *mpol_allowed;
3608 	unsigned int *array = h->free_huge_pages_node;
3609 	gfp_t gfp_mask = htlb_alloc_mask(h);
3610 
3611 	mpol_allowed = policy_nodemask_current(gfp_mask);
3612 
3613 	for_each_node_mask(node, cpuset_current_mems_allowed) {
3614 		if (!mpol_allowed || node_isset(node, *mpol_allowed))
3615 			nr += array[node];
3616 	}
3617 
3618 	return nr;
3619 }
3620 
3621 #ifdef CONFIG_SYSCTL
3622 static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
3623 					  void *buffer, size_t *length,
3624 					  loff_t *ppos, unsigned long *out)
3625 {
3626 	struct ctl_table dup_table;
3627 
3628 	/*
3629 	 * In order to avoid races with __do_proc_doulongvec_minmax(), we
3630 	 * can duplicate the @table and alter the duplicate of it.
3631 	 */
3632 	dup_table = *table;
3633 	dup_table.data = out;
3634 
3635 	return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
3636 }
3637 
3638 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
3639 			 struct ctl_table *table, int write,
3640 			 void *buffer, size_t *length, loff_t *ppos)
3641 {
3642 	struct hstate *h = &default_hstate;
3643 	unsigned long tmp = h->max_huge_pages;
3644 	int ret;
3645 
3646 	if (!hugepages_supported())
3647 		return -EOPNOTSUPP;
3648 
3649 	ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
3650 					     &tmp);
3651 	if (ret)
3652 		goto out;
3653 
3654 	if (write)
3655 		ret = __nr_hugepages_store_common(obey_mempolicy, h,
3656 						  NUMA_NO_NODE, tmp, *length);
3657 out:
3658 	return ret;
3659 }
3660 
3661 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
3662 			  void *buffer, size_t *length, loff_t *ppos)
3663 {
3664 
3665 	return hugetlb_sysctl_handler_common(false, table, write,
3666 							buffer, length, ppos);
3667 }
3668 
3669 #ifdef CONFIG_NUMA
3670 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
3671 			  void *buffer, size_t *length, loff_t *ppos)
3672 {
3673 	return hugetlb_sysctl_handler_common(true, table, write,
3674 							buffer, length, ppos);
3675 }
3676 #endif /* CONFIG_NUMA */
3677 
3678 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
3679 		void *buffer, size_t *length, loff_t *ppos)
3680 {
3681 	struct hstate *h = &default_hstate;
3682 	unsigned long tmp;
3683 	int ret;
3684 
3685 	if (!hugepages_supported())
3686 		return -EOPNOTSUPP;
3687 
3688 	tmp = h->nr_overcommit_huge_pages;
3689 
3690 	if (write && hstate_is_gigantic(h))
3691 		return -EINVAL;
3692 
3693 	ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
3694 					     &tmp);
3695 	if (ret)
3696 		goto out;
3697 
3698 	if (write) {
3699 		spin_lock_irq(&hugetlb_lock);
3700 		h->nr_overcommit_huge_pages = tmp;
3701 		spin_unlock_irq(&hugetlb_lock);
3702 	}
3703 out:
3704 	return ret;
3705 }
3706 
3707 #endif /* CONFIG_SYSCTL */
3708 
3709 void hugetlb_report_meminfo(struct seq_file *m)
3710 {
3711 	struct hstate *h;
3712 	unsigned long total = 0;
3713 
3714 	if (!hugepages_supported())
3715 		return;
3716 
3717 	for_each_hstate(h) {
3718 		unsigned long count = h->nr_huge_pages;
3719 
3720 		total += huge_page_size(h) * count;
3721 
3722 		if (h == &default_hstate)
3723 			seq_printf(m,
3724 				   "HugePages_Total:   %5lu\n"
3725 				   "HugePages_Free:    %5lu\n"
3726 				   "HugePages_Rsvd:    %5lu\n"
3727 				   "HugePages_Surp:    %5lu\n"
3728 				   "Hugepagesize:   %8lu kB\n",
3729 				   count,
3730 				   h->free_huge_pages,
3731 				   h->resv_huge_pages,
3732 				   h->surplus_huge_pages,
3733 				   huge_page_size(h) / SZ_1K);
3734 	}
3735 
3736 	seq_printf(m, "Hugetlb:        %8lu kB\n", total / SZ_1K);
3737 }
3738 
3739 int hugetlb_report_node_meminfo(char *buf, int len, int nid)
3740 {
3741 	struct hstate *h = &default_hstate;
3742 
3743 	if (!hugepages_supported())
3744 		return 0;
3745 
3746 	return sysfs_emit_at(buf, len,
3747 			     "Node %d HugePages_Total: %5u\n"
3748 			     "Node %d HugePages_Free:  %5u\n"
3749 			     "Node %d HugePages_Surp:  %5u\n",
3750 			     nid, h->nr_huge_pages_node[nid],
3751 			     nid, h->free_huge_pages_node[nid],
3752 			     nid, h->surplus_huge_pages_node[nid]);
3753 }
3754 
3755 void hugetlb_show_meminfo(void)
3756 {
3757 	struct hstate *h;
3758 	int nid;
3759 
3760 	if (!hugepages_supported())
3761 		return;
3762 
3763 	for_each_node_state(nid, N_MEMORY)
3764 		for_each_hstate(h)
3765 			pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3766 				nid,
3767 				h->nr_huge_pages_node[nid],
3768 				h->free_huge_pages_node[nid],
3769 				h->surplus_huge_pages_node[nid],
3770 				huge_page_size(h) / SZ_1K);
3771 }
3772 
3773 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3774 {
3775 	seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3776 		   atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3777 }
3778 
3779 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
3780 unsigned long hugetlb_total_pages(void)
3781 {
3782 	struct hstate *h;
3783 	unsigned long nr_total_pages = 0;
3784 
3785 	for_each_hstate(h)
3786 		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3787 	return nr_total_pages;
3788 }
3789 
3790 static int hugetlb_acct_memory(struct hstate *h, long delta)
3791 {
3792 	int ret = -ENOMEM;
3793 
3794 	if (!delta)
3795 		return 0;
3796 
3797 	spin_lock_irq(&hugetlb_lock);
3798 	/*
3799 	 * When cpuset is configured, it breaks the strict hugetlb page
3800 	 * reservation as the accounting is done on a global variable. Such
3801 	 * reservation is completely rubbish in the presence of cpuset because
3802 	 * the reservation is not checked against page availability for the
3803 	 * current cpuset. Application can still potentially OOM'ed by kernel
3804 	 * with lack of free htlb page in cpuset that the task is in.
3805 	 * Attempt to enforce strict accounting with cpuset is almost
3806 	 * impossible (or too ugly) because cpuset is too fluid that
3807 	 * task or memory node can be dynamically moved between cpusets.
3808 	 *
3809 	 * The change of semantics for shared hugetlb mapping with cpuset is
3810 	 * undesirable. However, in order to preserve some of the semantics,
3811 	 * we fall back to check against current free page availability as
3812 	 * a best attempt and hopefully to minimize the impact of changing
3813 	 * semantics that cpuset has.
3814 	 *
3815 	 * Apart from cpuset, we also have memory policy mechanism that
3816 	 * also determines from which node the kernel will allocate memory
3817 	 * in a NUMA system. So similar to cpuset, we also should consider
3818 	 * the memory policy of the current task. Similar to the description
3819 	 * above.
3820 	 */
3821 	if (delta > 0) {
3822 		if (gather_surplus_pages(h, delta) < 0)
3823 			goto out;
3824 
3825 		if (delta > allowed_mems_nr(h)) {
3826 			return_unused_surplus_pages(h, delta);
3827 			goto out;
3828 		}
3829 	}
3830 
3831 	ret = 0;
3832 	if (delta < 0)
3833 		return_unused_surplus_pages(h, (unsigned long) -delta);
3834 
3835 out:
3836 	spin_unlock_irq(&hugetlb_lock);
3837 	return ret;
3838 }
3839 
3840 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3841 {
3842 	struct resv_map *resv = vma_resv_map(vma);
3843 
3844 	/*
3845 	 * This new VMA should share its siblings reservation map if present.
3846 	 * The VMA will only ever have a valid reservation map pointer where
3847 	 * it is being copied for another still existing VMA.  As that VMA
3848 	 * has a reference to the reservation map it cannot disappear until
3849 	 * after this open call completes.  It is therefore safe to take a
3850 	 * new reference here without additional locking.
3851 	 */
3852 	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3853 		kref_get(&resv->refs);
3854 }
3855 
3856 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3857 {
3858 	struct hstate *h = hstate_vma(vma);
3859 	struct resv_map *resv = vma_resv_map(vma);
3860 	struct hugepage_subpool *spool = subpool_vma(vma);
3861 	unsigned long reserve, start, end;
3862 	long gbl_reserve;
3863 
3864 	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3865 		return;
3866 
3867 	start = vma_hugecache_offset(h, vma, vma->vm_start);
3868 	end = vma_hugecache_offset(h, vma, vma->vm_end);
3869 
3870 	reserve = (end - start) - region_count(resv, start, end);
3871 	hugetlb_cgroup_uncharge_counter(resv, start, end);
3872 	if (reserve) {
3873 		/*
3874 		 * Decrement reserve counts.  The global reserve count may be
3875 		 * adjusted if the subpool has a minimum size.
3876 		 */
3877 		gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3878 		hugetlb_acct_memory(h, -gbl_reserve);
3879 	}
3880 
3881 	kref_put(&resv->refs, resv_map_release);
3882 }
3883 
3884 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3885 {
3886 	if (addr & ~(huge_page_mask(hstate_vma(vma))))
3887 		return -EINVAL;
3888 	return 0;
3889 }
3890 
3891 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
3892 {
3893 	return huge_page_size(hstate_vma(vma));
3894 }
3895 
3896 /*
3897  * We cannot handle pagefaults against hugetlb pages at all.  They cause
3898  * handle_mm_fault() to try to instantiate regular-sized pages in the
3899  * hugepage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
3900  * this far.
3901  */
3902 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
3903 {
3904 	BUG();
3905 	return 0;
3906 }
3907 
3908 /*
3909  * When a new function is introduced to vm_operations_struct and added
3910  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
3911  * This is because under System V memory model, mappings created via
3912  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
3913  * their original vm_ops are overwritten with shm_vm_ops.
3914  */
3915 const struct vm_operations_struct hugetlb_vm_ops = {
3916 	.fault = hugetlb_vm_op_fault,
3917 	.open = hugetlb_vm_op_open,
3918 	.close = hugetlb_vm_op_close,
3919 	.may_split = hugetlb_vm_op_split,
3920 	.pagesize = hugetlb_vm_op_pagesize,
3921 };
3922 
3923 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3924 				int writable)
3925 {
3926 	pte_t entry;
3927 
3928 	if (writable) {
3929 		entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3930 					 vma->vm_page_prot)));
3931 	} else {
3932 		entry = huge_pte_wrprotect(mk_huge_pte(page,
3933 					   vma->vm_page_prot));
3934 	}
3935 	entry = pte_mkyoung(entry);
3936 	entry = pte_mkhuge(entry);
3937 	entry = arch_make_huge_pte(entry, vma, page, writable);
3938 
3939 	return entry;
3940 }
3941 
3942 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3943 				   unsigned long address, pte_t *ptep)
3944 {
3945 	pte_t entry;
3946 
3947 	entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3948 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3949 		update_mmu_cache(vma, address, ptep);
3950 }
3951 
3952 bool is_hugetlb_entry_migration(pte_t pte)
3953 {
3954 	swp_entry_t swp;
3955 
3956 	if (huge_pte_none(pte) || pte_present(pte))
3957 		return false;
3958 	swp = pte_to_swp_entry(pte);
3959 	if (is_migration_entry(swp))
3960 		return true;
3961 	else
3962 		return false;
3963 }
3964 
3965 static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
3966 {
3967 	swp_entry_t swp;
3968 
3969 	if (huge_pte_none(pte) || pte_present(pte))
3970 		return false;
3971 	swp = pte_to_swp_entry(pte);
3972 	if (is_hwpoison_entry(swp))
3973 		return true;
3974 	else
3975 		return false;
3976 }
3977 
3978 static void
3979 hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
3980 		     struct page *new_page)
3981 {
3982 	__SetPageUptodate(new_page);
3983 	set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1));
3984 	hugepage_add_new_anon_rmap(new_page, vma, addr);
3985 	hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
3986 	ClearHPageRestoreReserve(new_page);
3987 	SetHPageMigratable(new_page);
3988 }
3989 
3990 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3991 			    struct vm_area_struct *vma)
3992 {
3993 	pte_t *src_pte, *dst_pte, entry, dst_entry;
3994 	struct page *ptepage;
3995 	unsigned long addr;
3996 	bool cow = is_cow_mapping(vma->vm_flags);
3997 	struct hstate *h = hstate_vma(vma);
3998 	unsigned long sz = huge_page_size(h);
3999 	unsigned long npages = pages_per_huge_page(h);
4000 	struct address_space *mapping = vma->vm_file->f_mapping;
4001 	struct mmu_notifier_range range;
4002 	int ret = 0;
4003 
4004 	if (cow) {
4005 		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
4006 					vma->vm_start,
4007 					vma->vm_end);
4008 		mmu_notifier_invalidate_range_start(&range);
4009 	} else {
4010 		/*
4011 		 * For shared mappings i_mmap_rwsem must be held to call
4012 		 * huge_pte_alloc, otherwise the returned ptep could go
4013 		 * away if part of a shared pmd and another thread calls
4014 		 * huge_pmd_unshare.
4015 		 */
4016 		i_mmap_lock_read(mapping);
4017 	}
4018 
4019 	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
4020 		spinlock_t *src_ptl, *dst_ptl;
4021 		src_pte = huge_pte_offset(src, addr, sz);
4022 		if (!src_pte)
4023 			continue;
4024 		dst_pte = huge_pte_alloc(dst, vma, addr, sz);
4025 		if (!dst_pte) {
4026 			ret = -ENOMEM;
4027 			break;
4028 		}
4029 
4030 		/*
4031 		 * If the pagetables are shared don't copy or take references.
4032 		 * dst_pte == src_pte is the common case of src/dest sharing.
4033 		 *
4034 		 * However, src could have 'unshared' and dst shares with
4035 		 * another vma.  If dst_pte !none, this implies sharing.
4036 		 * Check here before taking page table lock, and once again
4037 		 * after taking the lock below.
4038 		 */
4039 		dst_entry = huge_ptep_get(dst_pte);
4040 		if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
4041 			continue;
4042 
4043 		dst_ptl = huge_pte_lock(h, dst, dst_pte);
4044 		src_ptl = huge_pte_lockptr(h, src, src_pte);
4045 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4046 		entry = huge_ptep_get(src_pte);
4047 		dst_entry = huge_ptep_get(dst_pte);
4048 again:
4049 		if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
4050 			/*
4051 			 * Skip if src entry none.  Also, skip in the
4052 			 * unlikely case dst entry !none as this implies
4053 			 * sharing with another vma.
4054 			 */
4055 			;
4056 		} else if (unlikely(is_hugetlb_entry_migration(entry) ||
4057 				    is_hugetlb_entry_hwpoisoned(entry))) {
4058 			swp_entry_t swp_entry = pte_to_swp_entry(entry);
4059 
4060 			if (is_write_migration_entry(swp_entry) && cow) {
4061 				/*
4062 				 * COW mappings require pages in both
4063 				 * parent and child to be set to read.
4064 				 */
4065 				make_migration_entry_read(&swp_entry);
4066 				entry = swp_entry_to_pte(swp_entry);
4067 				set_huge_swap_pte_at(src, addr, src_pte,
4068 						     entry, sz);
4069 			}
4070 			set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
4071 		} else {
4072 			entry = huge_ptep_get(src_pte);
4073 			ptepage = pte_page(entry);
4074 			get_page(ptepage);
4075 
4076 			/*
4077 			 * This is a rare case where we see pinned hugetlb
4078 			 * pages while they're prone to COW.  We need to do the
4079 			 * COW earlier during fork.
4080 			 *
4081 			 * When pre-allocating the page or copying data, we
4082 			 * need to be without the pgtable locks since we could
4083 			 * sleep during the process.
4084 			 */
4085 			if (unlikely(page_needs_cow_for_dma(vma, ptepage))) {
4086 				pte_t src_pte_old = entry;
4087 				struct page *new;
4088 
4089 				spin_unlock(src_ptl);
4090 				spin_unlock(dst_ptl);
4091 				/* Do not use reserve as it's private owned */
4092 				new = alloc_huge_page(vma, addr, 1);
4093 				if (IS_ERR(new)) {
4094 					put_page(ptepage);
4095 					ret = PTR_ERR(new);
4096 					break;
4097 				}
4098 				copy_user_huge_page(new, ptepage, addr, vma,
4099 						    npages);
4100 				put_page(ptepage);
4101 
4102 				/* Install the new huge page if src pte stable */
4103 				dst_ptl = huge_pte_lock(h, dst, dst_pte);
4104 				src_ptl = huge_pte_lockptr(h, src, src_pte);
4105 				spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4106 				entry = huge_ptep_get(src_pte);
4107 				if (!pte_same(src_pte_old, entry)) {
4108 					restore_reserve_on_error(h, vma, addr,
4109 								new);
4110 					put_page(new);
4111 					/* dst_entry won't change as in child */
4112 					goto again;
4113 				}
4114 				hugetlb_install_page(vma, dst_pte, addr, new);
4115 				spin_unlock(src_ptl);
4116 				spin_unlock(dst_ptl);
4117 				continue;
4118 			}
4119 
4120 			if (cow) {
4121 				/*
4122 				 * No need to notify as we are downgrading page
4123 				 * table protection not changing it to point
4124 				 * to a new page.
4125 				 *
4126 				 * See Documentation/vm/mmu_notifier.rst
4127 				 */
4128 				huge_ptep_set_wrprotect(src, addr, src_pte);
4129 				entry = huge_pte_wrprotect(entry);
4130 			}
4131 
4132 			page_dup_rmap(ptepage, true);
4133 			set_huge_pte_at(dst, addr, dst_pte, entry);
4134 			hugetlb_count_add(npages, dst);
4135 		}
4136 		spin_unlock(src_ptl);
4137 		spin_unlock(dst_ptl);
4138 	}
4139 
4140 	if (cow)
4141 		mmu_notifier_invalidate_range_end(&range);
4142 	else
4143 		i_mmap_unlock_read(mapping);
4144 
4145 	return ret;
4146 }
4147 
4148 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
4149 			    unsigned long start, unsigned long end,
4150 			    struct page *ref_page)
4151 {
4152 	struct mm_struct *mm = vma->vm_mm;
4153 	unsigned long address;
4154 	pte_t *ptep;
4155 	pte_t pte;
4156 	spinlock_t *ptl;
4157 	struct page *page;
4158 	struct hstate *h = hstate_vma(vma);
4159 	unsigned long sz = huge_page_size(h);
4160 	struct mmu_notifier_range range;
4161 
4162 	WARN_ON(!is_vm_hugetlb_page(vma));
4163 	BUG_ON(start & ~huge_page_mask(h));
4164 	BUG_ON(end & ~huge_page_mask(h));
4165 
4166 	/*
4167 	 * This is a hugetlb vma, all the pte entries should point
4168 	 * to huge page.
4169 	 */
4170 	tlb_change_page_size(tlb, sz);
4171 	tlb_start_vma(tlb, vma);
4172 
4173 	/*
4174 	 * If sharing possible, alert mmu notifiers of worst case.
4175 	 */
4176 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
4177 				end);
4178 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
4179 	mmu_notifier_invalidate_range_start(&range);
4180 	address = start;
4181 	for (; address < end; address += sz) {
4182 		ptep = huge_pte_offset(mm, address, sz);
4183 		if (!ptep)
4184 			continue;
4185 
4186 		ptl = huge_pte_lock(h, mm, ptep);
4187 		if (huge_pmd_unshare(mm, vma, &address, ptep)) {
4188 			spin_unlock(ptl);
4189 			/*
4190 			 * We just unmapped a page of PMDs by clearing a PUD.
4191 			 * The caller's TLB flush range should cover this area.
4192 			 */
4193 			continue;
4194 		}
4195 
4196 		pte = huge_ptep_get(ptep);
4197 		if (huge_pte_none(pte)) {
4198 			spin_unlock(ptl);
4199 			continue;
4200 		}
4201 
4202 		/*
4203 		 * Migrating hugepage or HWPoisoned hugepage is already
4204 		 * unmapped and its refcount is dropped, so just clear pte here.
4205 		 */
4206 		if (unlikely(!pte_present(pte))) {
4207 			huge_pte_clear(mm, address, ptep, sz);
4208 			spin_unlock(ptl);
4209 			continue;
4210 		}
4211 
4212 		page = pte_page(pte);
4213 		/*
4214 		 * If a reference page is supplied, it is because a specific
4215 		 * page is being unmapped, not a range. Ensure the page we
4216 		 * are about to unmap is the actual page of interest.
4217 		 */
4218 		if (ref_page) {
4219 			if (page != ref_page) {
4220 				spin_unlock(ptl);
4221 				continue;
4222 			}
4223 			/*
4224 			 * Mark the VMA as having unmapped its page so that
4225 			 * future faults in this VMA will fail rather than
4226 			 * looking like data was lost
4227 			 */
4228 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
4229 		}
4230 
4231 		pte = huge_ptep_get_and_clear(mm, address, ptep);
4232 		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
4233 		if (huge_pte_dirty(pte))
4234 			set_page_dirty(page);
4235 
4236 		hugetlb_count_sub(pages_per_huge_page(h), mm);
4237 		page_remove_rmap(page, true);
4238 
4239 		spin_unlock(ptl);
4240 		tlb_remove_page_size(tlb, page, huge_page_size(h));
4241 		/*
4242 		 * Bail out after unmapping reference page if supplied
4243 		 */
4244 		if (ref_page)
4245 			break;
4246 	}
4247 	mmu_notifier_invalidate_range_end(&range);
4248 	tlb_end_vma(tlb, vma);
4249 }
4250 
4251 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
4252 			  struct vm_area_struct *vma, unsigned long start,
4253 			  unsigned long end, struct page *ref_page)
4254 {
4255 	__unmap_hugepage_range(tlb, vma, start, end, ref_page);
4256 
4257 	/*
4258 	 * Clear this flag so that x86's huge_pmd_share page_table_shareable
4259 	 * test will fail on a vma being torn down, and not grab a page table
4260 	 * on its way out.  We're lucky that the flag has such an appropriate
4261 	 * name, and can in fact be safely cleared here. We could clear it
4262 	 * before the __unmap_hugepage_range above, but all that's necessary
4263 	 * is to clear it before releasing the i_mmap_rwsem. This works
4264 	 * because in the context this is called, the VMA is about to be
4265 	 * destroyed and the i_mmap_rwsem is held.
4266 	 */
4267 	vma->vm_flags &= ~VM_MAYSHARE;
4268 }
4269 
4270 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
4271 			  unsigned long end, struct page *ref_page)
4272 {
4273 	struct mmu_gather tlb;
4274 
4275 	tlb_gather_mmu(&tlb, vma->vm_mm);
4276 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
4277 	tlb_finish_mmu(&tlb);
4278 }
4279 
4280 /*
4281  * This is called when the original mapper is failing to COW a MAP_PRIVATE
4282  * mapping it owns the reserve page for. The intention is to unmap the page
4283  * from other VMAs and let the children be SIGKILLed if they are faulting the
4284  * same region.
4285  */
4286 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
4287 			      struct page *page, unsigned long address)
4288 {
4289 	struct hstate *h = hstate_vma(vma);
4290 	struct vm_area_struct *iter_vma;
4291 	struct address_space *mapping;
4292 	pgoff_t pgoff;
4293 
4294 	/*
4295 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
4296 	 * from page cache lookup which is in HPAGE_SIZE units.
4297 	 */
4298 	address = address & huge_page_mask(h);
4299 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
4300 			vma->vm_pgoff;
4301 	mapping = vma->vm_file->f_mapping;
4302 
4303 	/*
4304 	 * Take the mapping lock for the duration of the table walk. As
4305 	 * this mapping should be shared between all the VMAs,
4306 	 * __unmap_hugepage_range() is called as the lock is already held
4307 	 */
4308 	i_mmap_lock_write(mapping);
4309 	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
4310 		/* Do not unmap the current VMA */
4311 		if (iter_vma == vma)
4312 			continue;
4313 
4314 		/*
4315 		 * Shared VMAs have their own reserves and do not affect
4316 		 * MAP_PRIVATE accounting but it is possible that a shared
4317 		 * VMA is using the same page so check and skip such VMAs.
4318 		 */
4319 		if (iter_vma->vm_flags & VM_MAYSHARE)
4320 			continue;
4321 
4322 		/*
4323 		 * Unmap the page from other VMAs without their own reserves.
4324 		 * They get marked to be SIGKILLed if they fault in these
4325 		 * areas. This is because a future no-page fault on this VMA
4326 		 * could insert a zeroed page instead of the data existing
4327 		 * from the time of fork. This would look like data corruption
4328 		 */
4329 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
4330 			unmap_hugepage_range(iter_vma, address,
4331 					     address + huge_page_size(h), page);
4332 	}
4333 	i_mmap_unlock_write(mapping);
4334 }
4335 
4336 /*
4337  * Hugetlb_cow() should be called with page lock of the original hugepage held.
4338  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
4339  * cannot race with other handlers or page migration.
4340  * Keep the pte_same checks anyway to make transition from the mutex easier.
4341  */
4342 static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
4343 		       unsigned long address, pte_t *ptep,
4344 		       struct page *pagecache_page, spinlock_t *ptl)
4345 {
4346 	pte_t pte;
4347 	struct hstate *h = hstate_vma(vma);
4348 	struct page *old_page, *new_page;
4349 	int outside_reserve = 0;
4350 	vm_fault_t ret = 0;
4351 	unsigned long haddr = address & huge_page_mask(h);
4352 	struct mmu_notifier_range range;
4353 
4354 	pte = huge_ptep_get(ptep);
4355 	old_page = pte_page(pte);
4356 
4357 retry_avoidcopy:
4358 	/* If no-one else is actually using this page, avoid the copy
4359 	 * and just make the page writable */
4360 	if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
4361 		page_move_anon_rmap(old_page, vma);
4362 		set_huge_ptep_writable(vma, haddr, ptep);
4363 		return 0;
4364 	}
4365 
4366 	/*
4367 	 * If the process that created a MAP_PRIVATE mapping is about to
4368 	 * perform a COW due to a shared page count, attempt to satisfy
4369 	 * the allocation without using the existing reserves. The pagecache
4370 	 * page is used to determine if the reserve at this address was
4371 	 * consumed or not. If reserves were used, a partial faulted mapping
4372 	 * at the time of fork() could consume its reserves on COW instead
4373 	 * of the full address range.
4374 	 */
4375 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
4376 			old_page != pagecache_page)
4377 		outside_reserve = 1;
4378 
4379 	get_page(old_page);
4380 
4381 	/*
4382 	 * Drop page table lock as buddy allocator may be called. It will
4383 	 * be acquired again before returning to the caller, as expected.
4384 	 */
4385 	spin_unlock(ptl);
4386 	new_page = alloc_huge_page(vma, haddr, outside_reserve);
4387 
4388 	if (IS_ERR(new_page)) {
4389 		/*
4390 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
4391 		 * it is due to references held by a child and an insufficient
4392 		 * huge page pool. To guarantee the original mappers
4393 		 * reliability, unmap the page from child processes. The child
4394 		 * may get SIGKILLed if it later faults.
4395 		 */
4396 		if (outside_reserve) {
4397 			struct address_space *mapping = vma->vm_file->f_mapping;
4398 			pgoff_t idx;
4399 			u32 hash;
4400 
4401 			put_page(old_page);
4402 			BUG_ON(huge_pte_none(pte));
4403 			/*
4404 			 * Drop hugetlb_fault_mutex and i_mmap_rwsem before
4405 			 * unmapping.  unmapping needs to hold i_mmap_rwsem
4406 			 * in write mode.  Dropping i_mmap_rwsem in read mode
4407 			 * here is OK as COW mappings do not interact with
4408 			 * PMD sharing.
4409 			 *
4410 			 * Reacquire both after unmap operation.
4411 			 */
4412 			idx = vma_hugecache_offset(h, vma, haddr);
4413 			hash = hugetlb_fault_mutex_hash(mapping, idx);
4414 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4415 			i_mmap_unlock_read(mapping);
4416 
4417 			unmap_ref_private(mm, vma, old_page, haddr);
4418 
4419 			i_mmap_lock_read(mapping);
4420 			mutex_lock(&hugetlb_fault_mutex_table[hash]);
4421 			spin_lock(ptl);
4422 			ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4423 			if (likely(ptep &&
4424 				   pte_same(huge_ptep_get(ptep), pte)))
4425 				goto retry_avoidcopy;
4426 			/*
4427 			 * race occurs while re-acquiring page table
4428 			 * lock, and our job is done.
4429 			 */
4430 			return 0;
4431 		}
4432 
4433 		ret = vmf_error(PTR_ERR(new_page));
4434 		goto out_release_old;
4435 	}
4436 
4437 	/*
4438 	 * When the original hugepage is shared one, it does not have
4439 	 * anon_vma prepared.
4440 	 */
4441 	if (unlikely(anon_vma_prepare(vma))) {
4442 		ret = VM_FAULT_OOM;
4443 		goto out_release_all;
4444 	}
4445 
4446 	copy_user_huge_page(new_page, old_page, address, vma,
4447 			    pages_per_huge_page(h));
4448 	__SetPageUptodate(new_page);
4449 
4450 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
4451 				haddr + huge_page_size(h));
4452 	mmu_notifier_invalidate_range_start(&range);
4453 
4454 	/*
4455 	 * Retake the page table lock to check for racing updates
4456 	 * before the page tables are altered
4457 	 */
4458 	spin_lock(ptl);
4459 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4460 	if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
4461 		ClearHPageRestoreReserve(new_page);
4462 
4463 		/* Break COW */
4464 		huge_ptep_clear_flush(vma, haddr, ptep);
4465 		mmu_notifier_invalidate_range(mm, range.start, range.end);
4466 		set_huge_pte_at(mm, haddr, ptep,
4467 				make_huge_pte(vma, new_page, 1));
4468 		page_remove_rmap(old_page, true);
4469 		hugepage_add_new_anon_rmap(new_page, vma, haddr);
4470 		SetHPageMigratable(new_page);
4471 		/* Make the old page be freed below */
4472 		new_page = old_page;
4473 	}
4474 	spin_unlock(ptl);
4475 	mmu_notifier_invalidate_range_end(&range);
4476 out_release_all:
4477 	restore_reserve_on_error(h, vma, haddr, new_page);
4478 	put_page(new_page);
4479 out_release_old:
4480 	put_page(old_page);
4481 
4482 	spin_lock(ptl); /* Caller expects lock to be held */
4483 	return ret;
4484 }
4485 
4486 /* Return the pagecache page at a given address within a VMA */
4487 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
4488 			struct vm_area_struct *vma, unsigned long address)
4489 {
4490 	struct address_space *mapping;
4491 	pgoff_t idx;
4492 
4493 	mapping = vma->vm_file->f_mapping;
4494 	idx = vma_hugecache_offset(h, vma, address);
4495 
4496 	return find_lock_page(mapping, idx);
4497 }
4498 
4499 /*
4500  * Return whether there is a pagecache page to back given address within VMA.
4501  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
4502  */
4503 static bool hugetlbfs_pagecache_present(struct hstate *h,
4504 			struct vm_area_struct *vma, unsigned long address)
4505 {
4506 	struct address_space *mapping;
4507 	pgoff_t idx;
4508 	struct page *page;
4509 
4510 	mapping = vma->vm_file->f_mapping;
4511 	idx = vma_hugecache_offset(h, vma, address);
4512 
4513 	page = find_get_page(mapping, idx);
4514 	if (page)
4515 		put_page(page);
4516 	return page != NULL;
4517 }
4518 
4519 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
4520 			   pgoff_t idx)
4521 {
4522 	struct inode *inode = mapping->host;
4523 	struct hstate *h = hstate_inode(inode);
4524 	int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
4525 
4526 	if (err)
4527 		return err;
4528 	ClearHPageRestoreReserve(page);
4529 
4530 	/*
4531 	 * set page dirty so that it will not be removed from cache/file
4532 	 * by non-hugetlbfs specific code paths.
4533 	 */
4534 	set_page_dirty(page);
4535 
4536 	spin_lock(&inode->i_lock);
4537 	inode->i_blocks += blocks_per_huge_page(h);
4538 	spin_unlock(&inode->i_lock);
4539 	return 0;
4540 }
4541 
4542 static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
4543 						  struct address_space *mapping,
4544 						  pgoff_t idx,
4545 						  unsigned int flags,
4546 						  unsigned long haddr,
4547 						  unsigned long reason)
4548 {
4549 	vm_fault_t ret;
4550 	u32 hash;
4551 	struct vm_fault vmf = {
4552 		.vma = vma,
4553 		.address = haddr,
4554 		.flags = flags,
4555 
4556 		/*
4557 		 * Hard to debug if it ends up being
4558 		 * used by a callee that assumes
4559 		 * something about the other
4560 		 * uninitialized fields... same as in
4561 		 * memory.c
4562 		 */
4563 	};
4564 
4565 	/*
4566 	 * hugetlb_fault_mutex and i_mmap_rwsem must be
4567 	 * dropped before handling userfault.  Reacquire
4568 	 * after handling fault to make calling code simpler.
4569 	 */
4570 	hash = hugetlb_fault_mutex_hash(mapping, idx);
4571 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4572 	i_mmap_unlock_read(mapping);
4573 	ret = handle_userfault(&vmf, reason);
4574 	i_mmap_lock_read(mapping);
4575 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
4576 
4577 	return ret;
4578 }
4579 
4580 static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
4581 			struct vm_area_struct *vma,
4582 			struct address_space *mapping, pgoff_t idx,
4583 			unsigned long address, pte_t *ptep, unsigned int flags)
4584 {
4585 	struct hstate *h = hstate_vma(vma);
4586 	vm_fault_t ret = VM_FAULT_SIGBUS;
4587 	int anon_rmap = 0;
4588 	unsigned long size;
4589 	struct page *page;
4590 	pte_t new_pte;
4591 	spinlock_t *ptl;
4592 	unsigned long haddr = address & huge_page_mask(h);
4593 	bool new_page = false;
4594 
4595 	/*
4596 	 * Currently, we are forced to kill the process in the event the
4597 	 * original mapper has unmapped pages from the child due to a failed
4598 	 * COW. Warn that such a situation has occurred as it may not be obvious
4599 	 */
4600 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
4601 		pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
4602 			   current->pid);
4603 		return ret;
4604 	}
4605 
4606 	/*
4607 	 * We can not race with truncation due to holding i_mmap_rwsem.
4608 	 * i_size is modified when holding i_mmap_rwsem, so check here
4609 	 * once for faults beyond end of file.
4610 	 */
4611 	size = i_size_read(mapping->host) >> huge_page_shift(h);
4612 	if (idx >= size)
4613 		goto out;
4614 
4615 retry:
4616 	page = find_lock_page(mapping, idx);
4617 	if (!page) {
4618 		/* Check for page in userfault range */
4619 		if (userfaultfd_missing(vma)) {
4620 			ret = hugetlb_handle_userfault(vma, mapping, idx,
4621 						       flags, haddr,
4622 						       VM_UFFD_MISSING);
4623 			goto out;
4624 		}
4625 
4626 		page = alloc_huge_page(vma, haddr, 0);
4627 		if (IS_ERR(page)) {
4628 			/*
4629 			 * Returning error will result in faulting task being
4630 			 * sent SIGBUS.  The hugetlb fault mutex prevents two
4631 			 * tasks from racing to fault in the same page which
4632 			 * could result in false unable to allocate errors.
4633 			 * Page migration does not take the fault mutex, but
4634 			 * does a clear then write of pte's under page table
4635 			 * lock.  Page fault code could race with migration,
4636 			 * notice the clear pte and try to allocate a page
4637 			 * here.  Before returning error, get ptl and make
4638 			 * sure there really is no pte entry.
4639 			 */
4640 			ptl = huge_pte_lock(h, mm, ptep);
4641 			ret = 0;
4642 			if (huge_pte_none(huge_ptep_get(ptep)))
4643 				ret = vmf_error(PTR_ERR(page));
4644 			spin_unlock(ptl);
4645 			goto out;
4646 		}
4647 		clear_huge_page(page, address, pages_per_huge_page(h));
4648 		__SetPageUptodate(page);
4649 		new_page = true;
4650 
4651 		if (vma->vm_flags & VM_MAYSHARE) {
4652 			int err = huge_add_to_page_cache(page, mapping, idx);
4653 			if (err) {
4654 				put_page(page);
4655 				if (err == -EEXIST)
4656 					goto retry;
4657 				goto out;
4658 			}
4659 		} else {
4660 			lock_page(page);
4661 			if (unlikely(anon_vma_prepare(vma))) {
4662 				ret = VM_FAULT_OOM;
4663 				goto backout_unlocked;
4664 			}
4665 			anon_rmap = 1;
4666 		}
4667 	} else {
4668 		/*
4669 		 * If memory error occurs between mmap() and fault, some process
4670 		 * don't have hwpoisoned swap entry for errored virtual address.
4671 		 * So we need to block hugepage fault by PG_hwpoison bit check.
4672 		 */
4673 		if (unlikely(PageHWPoison(page))) {
4674 			ret = VM_FAULT_HWPOISON_LARGE |
4675 				VM_FAULT_SET_HINDEX(hstate_index(h));
4676 			goto backout_unlocked;
4677 		}
4678 
4679 		/* Check for page in userfault range. */
4680 		if (userfaultfd_minor(vma)) {
4681 			unlock_page(page);
4682 			put_page(page);
4683 			ret = hugetlb_handle_userfault(vma, mapping, idx,
4684 						       flags, haddr,
4685 						       VM_UFFD_MINOR);
4686 			goto out;
4687 		}
4688 	}
4689 
4690 	/*
4691 	 * If we are going to COW a private mapping later, we examine the
4692 	 * pending reservations for this page now. This will ensure that
4693 	 * any allocations necessary to record that reservation occur outside
4694 	 * the spinlock.
4695 	 */
4696 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
4697 		if (vma_needs_reservation(h, vma, haddr) < 0) {
4698 			ret = VM_FAULT_OOM;
4699 			goto backout_unlocked;
4700 		}
4701 		/* Just decrements count, does not deallocate */
4702 		vma_end_reservation(h, vma, haddr);
4703 	}
4704 
4705 	ptl = huge_pte_lock(h, mm, ptep);
4706 	ret = 0;
4707 	if (!huge_pte_none(huge_ptep_get(ptep)))
4708 		goto backout;
4709 
4710 	if (anon_rmap) {
4711 		ClearHPageRestoreReserve(page);
4712 		hugepage_add_new_anon_rmap(page, vma, haddr);
4713 	} else
4714 		page_dup_rmap(page, true);
4715 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
4716 				&& (vma->vm_flags & VM_SHARED)));
4717 	set_huge_pte_at(mm, haddr, ptep, new_pte);
4718 
4719 	hugetlb_count_add(pages_per_huge_page(h), mm);
4720 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
4721 		/* Optimization, do the COW without a second fault */
4722 		ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
4723 	}
4724 
4725 	spin_unlock(ptl);
4726 
4727 	/*
4728 	 * Only set HPageMigratable in newly allocated pages.  Existing pages
4729 	 * found in the pagecache may not have HPageMigratableset if they have
4730 	 * been isolated for migration.
4731 	 */
4732 	if (new_page)
4733 		SetHPageMigratable(page);
4734 
4735 	unlock_page(page);
4736 out:
4737 	return ret;
4738 
4739 backout:
4740 	spin_unlock(ptl);
4741 backout_unlocked:
4742 	unlock_page(page);
4743 	restore_reserve_on_error(h, vma, haddr, page);
4744 	put_page(page);
4745 	goto out;
4746 }
4747 
4748 #ifdef CONFIG_SMP
4749 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
4750 {
4751 	unsigned long key[2];
4752 	u32 hash;
4753 
4754 	key[0] = (unsigned long) mapping;
4755 	key[1] = idx;
4756 
4757 	hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
4758 
4759 	return hash & (num_fault_mutexes - 1);
4760 }
4761 #else
4762 /*
4763  * For uniprocessor systems we always use a single mutex, so just
4764  * return 0 and avoid the hashing overhead.
4765  */
4766 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
4767 {
4768 	return 0;
4769 }
4770 #endif
4771 
4772 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
4773 			unsigned long address, unsigned int flags)
4774 {
4775 	pte_t *ptep, entry;
4776 	spinlock_t *ptl;
4777 	vm_fault_t ret;
4778 	u32 hash;
4779 	pgoff_t idx;
4780 	struct page *page = NULL;
4781 	struct page *pagecache_page = NULL;
4782 	struct hstate *h = hstate_vma(vma);
4783 	struct address_space *mapping;
4784 	int need_wait_lock = 0;
4785 	unsigned long haddr = address & huge_page_mask(h);
4786 
4787 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4788 	if (ptep) {
4789 		/*
4790 		 * Since we hold no locks, ptep could be stale.  That is
4791 		 * OK as we are only making decisions based on content and
4792 		 * not actually modifying content here.
4793 		 */
4794 		entry = huge_ptep_get(ptep);
4795 		if (unlikely(is_hugetlb_entry_migration(entry))) {
4796 			migration_entry_wait_huge(vma, mm, ptep);
4797 			return 0;
4798 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
4799 			return VM_FAULT_HWPOISON_LARGE |
4800 				VM_FAULT_SET_HINDEX(hstate_index(h));
4801 	}
4802 
4803 	/*
4804 	 * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
4805 	 * until finished with ptep.  This serves two purposes:
4806 	 * 1) It prevents huge_pmd_unshare from being called elsewhere
4807 	 *    and making the ptep no longer valid.
4808 	 * 2) It synchronizes us with i_size modifications during truncation.
4809 	 *
4810 	 * ptep could have already be assigned via huge_pte_offset.  That
4811 	 * is OK, as huge_pte_alloc will return the same value unless
4812 	 * something has changed.
4813 	 */
4814 	mapping = vma->vm_file->f_mapping;
4815 	i_mmap_lock_read(mapping);
4816 	ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
4817 	if (!ptep) {
4818 		i_mmap_unlock_read(mapping);
4819 		return VM_FAULT_OOM;
4820 	}
4821 
4822 	/*
4823 	 * Serialize hugepage allocation and instantiation, so that we don't
4824 	 * get spurious allocation failures if two CPUs race to instantiate
4825 	 * the same page in the page cache.
4826 	 */
4827 	idx = vma_hugecache_offset(h, vma, haddr);
4828 	hash = hugetlb_fault_mutex_hash(mapping, idx);
4829 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
4830 
4831 	entry = huge_ptep_get(ptep);
4832 	if (huge_pte_none(entry)) {
4833 		ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
4834 		goto out_mutex;
4835 	}
4836 
4837 	ret = 0;
4838 
4839 	/*
4840 	 * entry could be a migration/hwpoison entry at this point, so this
4841 	 * check prevents the kernel from going below assuming that we have
4842 	 * an active hugepage in pagecache. This goto expects the 2nd page
4843 	 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
4844 	 * properly handle it.
4845 	 */
4846 	if (!pte_present(entry))
4847 		goto out_mutex;
4848 
4849 	/*
4850 	 * If we are going to COW the mapping later, we examine the pending
4851 	 * reservations for this page now. This will ensure that any
4852 	 * allocations necessary to record that reservation occur outside the
4853 	 * spinlock. For private mappings, we also lookup the pagecache
4854 	 * page now as it is used to determine if a reservation has been
4855 	 * consumed.
4856 	 */
4857 	if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
4858 		if (vma_needs_reservation(h, vma, haddr) < 0) {
4859 			ret = VM_FAULT_OOM;
4860 			goto out_mutex;
4861 		}
4862 		/* Just decrements count, does not deallocate */
4863 		vma_end_reservation(h, vma, haddr);
4864 
4865 		if (!(vma->vm_flags & VM_MAYSHARE))
4866 			pagecache_page = hugetlbfs_pagecache_page(h,
4867 								vma, haddr);
4868 	}
4869 
4870 	ptl = huge_pte_lock(h, mm, ptep);
4871 
4872 	/* Check for a racing update before calling hugetlb_cow */
4873 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
4874 		goto out_ptl;
4875 
4876 	/*
4877 	 * hugetlb_cow() requires page locks of pte_page(entry) and
4878 	 * pagecache_page, so here we need take the former one
4879 	 * when page != pagecache_page or !pagecache_page.
4880 	 */
4881 	page = pte_page(entry);
4882 	if (page != pagecache_page)
4883 		if (!trylock_page(page)) {
4884 			need_wait_lock = 1;
4885 			goto out_ptl;
4886 		}
4887 
4888 	get_page(page);
4889 
4890 	if (flags & FAULT_FLAG_WRITE) {
4891 		if (!huge_pte_write(entry)) {
4892 			ret = hugetlb_cow(mm, vma, address, ptep,
4893 					  pagecache_page, ptl);
4894 			goto out_put_page;
4895 		}
4896 		entry = huge_pte_mkdirty(entry);
4897 	}
4898 	entry = pte_mkyoung(entry);
4899 	if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
4900 						flags & FAULT_FLAG_WRITE))
4901 		update_mmu_cache(vma, haddr, ptep);
4902 out_put_page:
4903 	if (page != pagecache_page)
4904 		unlock_page(page);
4905 	put_page(page);
4906 out_ptl:
4907 	spin_unlock(ptl);
4908 
4909 	if (pagecache_page) {
4910 		unlock_page(pagecache_page);
4911 		put_page(pagecache_page);
4912 	}
4913 out_mutex:
4914 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4915 	i_mmap_unlock_read(mapping);
4916 	/*
4917 	 * Generally it's safe to hold refcount during waiting page lock. But
4918 	 * here we just wait to defer the next page fault to avoid busy loop and
4919 	 * the page is not used after unlocked before returning from the current
4920 	 * page fault. So we are safe from accessing freed page, even if we wait
4921 	 * here without taking refcount.
4922 	 */
4923 	if (need_wait_lock)
4924 		wait_on_page_locked(page);
4925 	return ret;
4926 }
4927 
4928 #ifdef CONFIG_USERFAULTFD
4929 /*
4930  * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
4931  * modifications for huge pages.
4932  */
4933 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4934 			    pte_t *dst_pte,
4935 			    struct vm_area_struct *dst_vma,
4936 			    unsigned long dst_addr,
4937 			    unsigned long src_addr,
4938 			    enum mcopy_atomic_mode mode,
4939 			    struct page **pagep)
4940 {
4941 	bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE);
4942 	struct address_space *mapping;
4943 	pgoff_t idx;
4944 	unsigned long size;
4945 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
4946 	struct hstate *h = hstate_vma(dst_vma);
4947 	pte_t _dst_pte;
4948 	spinlock_t *ptl;
4949 	int ret;
4950 	struct page *page;
4951 	int writable;
4952 
4953 	mapping = dst_vma->vm_file->f_mapping;
4954 	idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4955 
4956 	if (is_continue) {
4957 		ret = -EFAULT;
4958 		page = find_lock_page(mapping, idx);
4959 		if (!page)
4960 			goto out;
4961 	} else if (!*pagep) {
4962 		/* If a page already exists, then it's UFFDIO_COPY for
4963 		 * a non-missing case. Return -EEXIST.
4964 		 */
4965 		if (vm_shared &&
4966 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
4967 			ret = -EEXIST;
4968 			goto out;
4969 		}
4970 
4971 		page = alloc_huge_page(dst_vma, dst_addr, 0);
4972 		if (IS_ERR(page)) {
4973 			ret = -ENOMEM;
4974 			goto out;
4975 		}
4976 
4977 		ret = copy_huge_page_from_user(page,
4978 						(const void __user *) src_addr,
4979 						pages_per_huge_page(h), false);
4980 
4981 		/* fallback to copy_from_user outside mmap_lock */
4982 		if (unlikely(ret)) {
4983 			ret = -ENOENT;
4984 			*pagep = page;
4985 			/* don't free the page */
4986 			goto out;
4987 		}
4988 	} else {
4989 		page = *pagep;
4990 		*pagep = NULL;
4991 	}
4992 
4993 	/*
4994 	 * The memory barrier inside __SetPageUptodate makes sure that
4995 	 * preceding stores to the page contents become visible before
4996 	 * the set_pte_at() write.
4997 	 */
4998 	__SetPageUptodate(page);
4999 
5000 	/* Add shared, newly allocated pages to the page cache. */
5001 	if (vm_shared && !is_continue) {
5002 		size = i_size_read(mapping->host) >> huge_page_shift(h);
5003 		ret = -EFAULT;
5004 		if (idx >= size)
5005 			goto out_release_nounlock;
5006 
5007 		/*
5008 		 * Serialization between remove_inode_hugepages() and
5009 		 * huge_add_to_page_cache() below happens through the
5010 		 * hugetlb_fault_mutex_table that here must be hold by
5011 		 * the caller.
5012 		 */
5013 		ret = huge_add_to_page_cache(page, mapping, idx);
5014 		if (ret)
5015 			goto out_release_nounlock;
5016 	}
5017 
5018 	ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
5019 	spin_lock(ptl);
5020 
5021 	/*
5022 	 * Recheck the i_size after holding PT lock to make sure not
5023 	 * to leave any page mapped (as page_mapped()) beyond the end
5024 	 * of the i_size (remove_inode_hugepages() is strict about
5025 	 * enforcing that). If we bail out here, we'll also leave a
5026 	 * page in the radix tree in the vm_shared case beyond the end
5027 	 * of the i_size, but remove_inode_hugepages() will take care
5028 	 * of it as soon as we drop the hugetlb_fault_mutex_table.
5029 	 */
5030 	size = i_size_read(mapping->host) >> huge_page_shift(h);
5031 	ret = -EFAULT;
5032 	if (idx >= size)
5033 		goto out_release_unlock;
5034 
5035 	ret = -EEXIST;
5036 	if (!huge_pte_none(huge_ptep_get(dst_pte)))
5037 		goto out_release_unlock;
5038 
5039 	if (vm_shared) {
5040 		page_dup_rmap(page, true);
5041 	} else {
5042 		ClearHPageRestoreReserve(page);
5043 		hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
5044 	}
5045 
5046 	/* For CONTINUE on a non-shared VMA, don't set VM_WRITE for CoW. */
5047 	if (is_continue && !vm_shared)
5048 		writable = 0;
5049 	else
5050 		writable = dst_vma->vm_flags & VM_WRITE;
5051 
5052 	_dst_pte = make_huge_pte(dst_vma, page, writable);
5053 	if (writable)
5054 		_dst_pte = huge_pte_mkdirty(_dst_pte);
5055 	_dst_pte = pte_mkyoung(_dst_pte);
5056 
5057 	set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
5058 
5059 	(void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
5060 					dst_vma->vm_flags & VM_WRITE);
5061 	hugetlb_count_add(pages_per_huge_page(h), dst_mm);
5062 
5063 	/* No need to invalidate - it was non-present before */
5064 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
5065 
5066 	spin_unlock(ptl);
5067 	if (!is_continue)
5068 		SetHPageMigratable(page);
5069 	if (vm_shared || is_continue)
5070 		unlock_page(page);
5071 	ret = 0;
5072 out:
5073 	return ret;
5074 out_release_unlock:
5075 	spin_unlock(ptl);
5076 	if (vm_shared || is_continue)
5077 		unlock_page(page);
5078 out_release_nounlock:
5079 	restore_reserve_on_error(h, dst_vma, dst_addr, page);
5080 	put_page(page);
5081 	goto out;
5082 }
5083 #endif /* CONFIG_USERFAULTFD */
5084 
5085 static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,
5086 				 int refs, struct page **pages,
5087 				 struct vm_area_struct **vmas)
5088 {
5089 	int nr;
5090 
5091 	for (nr = 0; nr < refs; nr++) {
5092 		if (likely(pages))
5093 			pages[nr] = mem_map_offset(page, nr);
5094 		if (vmas)
5095 			vmas[nr] = vma;
5096 	}
5097 }
5098 
5099 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
5100 			 struct page **pages, struct vm_area_struct **vmas,
5101 			 unsigned long *position, unsigned long *nr_pages,
5102 			 long i, unsigned int flags, int *locked)
5103 {
5104 	unsigned long pfn_offset;
5105 	unsigned long vaddr = *position;
5106 	unsigned long remainder = *nr_pages;
5107 	struct hstate *h = hstate_vma(vma);
5108 	int err = -EFAULT, refs;
5109 
5110 	while (vaddr < vma->vm_end && remainder) {
5111 		pte_t *pte;
5112 		spinlock_t *ptl = NULL;
5113 		int absent;
5114 		struct page *page;
5115 
5116 		/*
5117 		 * If we have a pending SIGKILL, don't keep faulting pages and
5118 		 * potentially allocating memory.
5119 		 */
5120 		if (fatal_signal_pending(current)) {
5121 			remainder = 0;
5122 			break;
5123 		}
5124 
5125 		/*
5126 		 * Some archs (sparc64, sh*) have multiple pte_ts to
5127 		 * each hugepage.  We have to make sure we get the
5128 		 * first, for the page indexing below to work.
5129 		 *
5130 		 * Note that page table lock is not held when pte is null.
5131 		 */
5132 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
5133 				      huge_page_size(h));
5134 		if (pte)
5135 			ptl = huge_pte_lock(h, mm, pte);
5136 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
5137 
5138 		/*
5139 		 * When coredumping, it suits get_dump_page if we just return
5140 		 * an error where there's an empty slot with no huge pagecache
5141 		 * to back it.  This way, we avoid allocating a hugepage, and
5142 		 * the sparse dumpfile avoids allocating disk blocks, but its
5143 		 * huge holes still show up with zeroes where they need to be.
5144 		 */
5145 		if (absent && (flags & FOLL_DUMP) &&
5146 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
5147 			if (pte)
5148 				spin_unlock(ptl);
5149 			remainder = 0;
5150 			break;
5151 		}
5152 
5153 		/*
5154 		 * We need call hugetlb_fault for both hugepages under migration
5155 		 * (in which case hugetlb_fault waits for the migration,) and
5156 		 * hwpoisoned hugepages (in which case we need to prevent the
5157 		 * caller from accessing to them.) In order to do this, we use
5158 		 * here is_swap_pte instead of is_hugetlb_entry_migration and
5159 		 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
5160 		 * both cases, and because we can't follow correct pages
5161 		 * directly from any kind of swap entries.
5162 		 */
5163 		if (absent || is_swap_pte(huge_ptep_get(pte)) ||
5164 		    ((flags & FOLL_WRITE) &&
5165 		      !huge_pte_write(huge_ptep_get(pte)))) {
5166 			vm_fault_t ret;
5167 			unsigned int fault_flags = 0;
5168 
5169 			if (pte)
5170 				spin_unlock(ptl);
5171 			if (flags & FOLL_WRITE)
5172 				fault_flags |= FAULT_FLAG_WRITE;
5173 			if (locked)
5174 				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
5175 					FAULT_FLAG_KILLABLE;
5176 			if (flags & FOLL_NOWAIT)
5177 				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
5178 					FAULT_FLAG_RETRY_NOWAIT;
5179 			if (flags & FOLL_TRIED) {
5180 				/*
5181 				 * Note: FAULT_FLAG_ALLOW_RETRY and
5182 				 * FAULT_FLAG_TRIED can co-exist
5183 				 */
5184 				fault_flags |= FAULT_FLAG_TRIED;
5185 			}
5186 			ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
5187 			if (ret & VM_FAULT_ERROR) {
5188 				err = vm_fault_to_errno(ret, flags);
5189 				remainder = 0;
5190 				break;
5191 			}
5192 			if (ret & VM_FAULT_RETRY) {
5193 				if (locked &&
5194 				    !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
5195 					*locked = 0;
5196 				*nr_pages = 0;
5197 				/*
5198 				 * VM_FAULT_RETRY must not return an
5199 				 * error, it will return zero
5200 				 * instead.
5201 				 *
5202 				 * No need to update "position" as the
5203 				 * caller will not check it after
5204 				 * *nr_pages is set to 0.
5205 				 */
5206 				return i;
5207 			}
5208 			continue;
5209 		}
5210 
5211 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
5212 		page = pte_page(huge_ptep_get(pte));
5213 
5214 		/*
5215 		 * If subpage information not requested, update counters
5216 		 * and skip the same_page loop below.
5217 		 */
5218 		if (!pages && !vmas && !pfn_offset &&
5219 		    (vaddr + huge_page_size(h) < vma->vm_end) &&
5220 		    (remainder >= pages_per_huge_page(h))) {
5221 			vaddr += huge_page_size(h);
5222 			remainder -= pages_per_huge_page(h);
5223 			i += pages_per_huge_page(h);
5224 			spin_unlock(ptl);
5225 			continue;
5226 		}
5227 
5228 		refs = min3(pages_per_huge_page(h) - pfn_offset,
5229 			    (vma->vm_end - vaddr) >> PAGE_SHIFT, remainder);
5230 
5231 		if (pages || vmas)
5232 			record_subpages_vmas(mem_map_offset(page, pfn_offset),
5233 					     vma, refs,
5234 					     likely(pages) ? pages + i : NULL,
5235 					     vmas ? vmas + i : NULL);
5236 
5237 		if (pages) {
5238 			/*
5239 			 * try_grab_compound_head() should always succeed here,
5240 			 * because: a) we hold the ptl lock, and b) we've just
5241 			 * checked that the huge page is present in the page
5242 			 * tables. If the huge page is present, then the tail
5243 			 * pages must also be present. The ptl prevents the
5244 			 * head page and tail pages from being rearranged in
5245 			 * any way. So this page must be available at this
5246 			 * point, unless the page refcount overflowed:
5247 			 */
5248 			if (WARN_ON_ONCE(!try_grab_compound_head(pages[i],
5249 								 refs,
5250 								 flags))) {
5251 				spin_unlock(ptl);
5252 				remainder = 0;
5253 				err = -ENOMEM;
5254 				break;
5255 			}
5256 		}
5257 
5258 		vaddr += (refs << PAGE_SHIFT);
5259 		remainder -= refs;
5260 		i += refs;
5261 
5262 		spin_unlock(ptl);
5263 	}
5264 	*nr_pages = remainder;
5265 	/*
5266 	 * setting position is actually required only if remainder is
5267 	 * not zero but it's faster not to add a "if (remainder)"
5268 	 * branch.
5269 	 */
5270 	*position = vaddr;
5271 
5272 	return i ? i : err;
5273 }
5274 
5275 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
5276 		unsigned long address, unsigned long end, pgprot_t newprot)
5277 {
5278 	struct mm_struct *mm = vma->vm_mm;
5279 	unsigned long start = address;
5280 	pte_t *ptep;
5281 	pte_t pte;
5282 	struct hstate *h = hstate_vma(vma);
5283 	unsigned long pages = 0;
5284 	bool shared_pmd = false;
5285 	struct mmu_notifier_range range;
5286 
5287 	/*
5288 	 * In the case of shared PMDs, the area to flush could be beyond
5289 	 * start/end.  Set range.start/range.end to cover the maximum possible
5290 	 * range if PMD sharing is possible.
5291 	 */
5292 	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
5293 				0, vma, mm, start, end);
5294 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5295 
5296 	BUG_ON(address >= end);
5297 	flush_cache_range(vma, range.start, range.end);
5298 
5299 	mmu_notifier_invalidate_range_start(&range);
5300 	i_mmap_lock_write(vma->vm_file->f_mapping);
5301 	for (; address < end; address += huge_page_size(h)) {
5302 		spinlock_t *ptl;
5303 		ptep = huge_pte_offset(mm, address, huge_page_size(h));
5304 		if (!ptep)
5305 			continue;
5306 		ptl = huge_pte_lock(h, mm, ptep);
5307 		if (huge_pmd_unshare(mm, vma, &address, ptep)) {
5308 			pages++;
5309 			spin_unlock(ptl);
5310 			shared_pmd = true;
5311 			continue;
5312 		}
5313 		pte = huge_ptep_get(ptep);
5314 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
5315 			spin_unlock(ptl);
5316 			continue;
5317 		}
5318 		if (unlikely(is_hugetlb_entry_migration(pte))) {
5319 			swp_entry_t entry = pte_to_swp_entry(pte);
5320 
5321 			if (is_write_migration_entry(entry)) {
5322 				pte_t newpte;
5323 
5324 				make_migration_entry_read(&entry);
5325 				newpte = swp_entry_to_pte(entry);
5326 				set_huge_swap_pte_at(mm, address, ptep,
5327 						     newpte, huge_page_size(h));
5328 				pages++;
5329 			}
5330 			spin_unlock(ptl);
5331 			continue;
5332 		}
5333 		if (!huge_pte_none(pte)) {
5334 			pte_t old_pte;
5335 
5336 			old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
5337 			pte = pte_mkhuge(huge_pte_modify(old_pte, newprot));
5338 			pte = arch_make_huge_pte(pte, vma, NULL, 0);
5339 			huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
5340 			pages++;
5341 		}
5342 		spin_unlock(ptl);
5343 	}
5344 	/*
5345 	 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
5346 	 * may have cleared our pud entry and done put_page on the page table:
5347 	 * once we release i_mmap_rwsem, another task can do the final put_page
5348 	 * and that page table be reused and filled with junk.  If we actually
5349 	 * did unshare a page of pmds, flush the range corresponding to the pud.
5350 	 */
5351 	if (shared_pmd)
5352 		flush_hugetlb_tlb_range(vma, range.start, range.end);
5353 	else
5354 		flush_hugetlb_tlb_range(vma, start, end);
5355 	/*
5356 	 * No need to call mmu_notifier_invalidate_range() we are downgrading
5357 	 * page table protection not changing it to point to a new page.
5358 	 *
5359 	 * See Documentation/vm/mmu_notifier.rst
5360 	 */
5361 	i_mmap_unlock_write(vma->vm_file->f_mapping);
5362 	mmu_notifier_invalidate_range_end(&range);
5363 
5364 	return pages << h->order;
5365 }
5366 
5367 /* Return true if reservation was successful, false otherwise.  */
5368 bool hugetlb_reserve_pages(struct inode *inode,
5369 					long from, long to,
5370 					struct vm_area_struct *vma,
5371 					vm_flags_t vm_flags)
5372 {
5373 	long chg, add = -1;
5374 	struct hstate *h = hstate_inode(inode);
5375 	struct hugepage_subpool *spool = subpool_inode(inode);
5376 	struct resv_map *resv_map;
5377 	struct hugetlb_cgroup *h_cg = NULL;
5378 	long gbl_reserve, regions_needed = 0;
5379 
5380 	/* This should never happen */
5381 	if (from > to) {
5382 		VM_WARN(1, "%s called with a negative range\n", __func__);
5383 		return false;
5384 	}
5385 
5386 	/*
5387 	 * Only apply hugepage reservation if asked. At fault time, an
5388 	 * attempt will be made for VM_NORESERVE to allocate a page
5389 	 * without using reserves
5390 	 */
5391 	if (vm_flags & VM_NORESERVE)
5392 		return true;
5393 
5394 	/*
5395 	 * Shared mappings base their reservation on the number of pages that
5396 	 * are already allocated on behalf of the file. Private mappings need
5397 	 * to reserve the full area even if read-only as mprotect() may be
5398 	 * called to make the mapping read-write. Assume !vma is a shm mapping
5399 	 */
5400 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
5401 		/*
5402 		 * resv_map can not be NULL as hugetlb_reserve_pages is only
5403 		 * called for inodes for which resv_maps were created (see
5404 		 * hugetlbfs_get_inode).
5405 		 */
5406 		resv_map = inode_resv_map(inode);
5407 
5408 		chg = region_chg(resv_map, from, to, &regions_needed);
5409 
5410 	} else {
5411 		/* Private mapping. */
5412 		resv_map = resv_map_alloc();
5413 		if (!resv_map)
5414 			return false;
5415 
5416 		chg = to - from;
5417 
5418 		set_vma_resv_map(vma, resv_map);
5419 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
5420 	}
5421 
5422 	if (chg < 0)
5423 		goto out_err;
5424 
5425 	if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
5426 				chg * pages_per_huge_page(h), &h_cg) < 0)
5427 		goto out_err;
5428 
5429 	if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
5430 		/* For private mappings, the hugetlb_cgroup uncharge info hangs
5431 		 * of the resv_map.
5432 		 */
5433 		resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
5434 	}
5435 
5436 	/*
5437 	 * There must be enough pages in the subpool for the mapping. If
5438 	 * the subpool has a minimum size, there may be some global
5439 	 * reservations already in place (gbl_reserve).
5440 	 */
5441 	gbl_reserve = hugepage_subpool_get_pages(spool, chg);
5442 	if (gbl_reserve < 0)
5443 		goto out_uncharge_cgroup;
5444 
5445 	/*
5446 	 * Check enough hugepages are available for the reservation.
5447 	 * Hand the pages back to the subpool if there are not
5448 	 */
5449 	if (hugetlb_acct_memory(h, gbl_reserve) < 0)
5450 		goto out_put_pages;
5451 
5452 	/*
5453 	 * Account for the reservations made. Shared mappings record regions
5454 	 * that have reservations as they are shared by multiple VMAs.
5455 	 * When the last VMA disappears, the region map says how much
5456 	 * the reservation was and the page cache tells how much of
5457 	 * the reservation was consumed. Private mappings are per-VMA and
5458 	 * only the consumed reservations are tracked. When the VMA
5459 	 * disappears, the original reservation is the VMA size and the
5460 	 * consumed reservations are stored in the map. Hence, nothing
5461 	 * else has to be done for private mappings here
5462 	 */
5463 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
5464 		add = region_add(resv_map, from, to, regions_needed, h, h_cg);
5465 
5466 		if (unlikely(add < 0)) {
5467 			hugetlb_acct_memory(h, -gbl_reserve);
5468 			goto out_put_pages;
5469 		} else if (unlikely(chg > add)) {
5470 			/*
5471 			 * pages in this range were added to the reserve
5472 			 * map between region_chg and region_add.  This
5473 			 * indicates a race with alloc_huge_page.  Adjust
5474 			 * the subpool and reserve counts modified above
5475 			 * based on the difference.
5476 			 */
5477 			long rsv_adjust;
5478 
5479 			/*
5480 			 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
5481 			 * reference to h_cg->css. See comment below for detail.
5482 			 */
5483 			hugetlb_cgroup_uncharge_cgroup_rsvd(
5484 				hstate_index(h),
5485 				(chg - add) * pages_per_huge_page(h), h_cg);
5486 
5487 			rsv_adjust = hugepage_subpool_put_pages(spool,
5488 								chg - add);
5489 			hugetlb_acct_memory(h, -rsv_adjust);
5490 		} else if (h_cg) {
5491 			/*
5492 			 * The file_regions will hold their own reference to
5493 			 * h_cg->css. So we should release the reference held
5494 			 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
5495 			 * done.
5496 			 */
5497 			hugetlb_cgroup_put_rsvd_cgroup(h_cg);
5498 		}
5499 	}
5500 	return true;
5501 
5502 out_put_pages:
5503 	/* put back original number of pages, chg */
5504 	(void)hugepage_subpool_put_pages(spool, chg);
5505 out_uncharge_cgroup:
5506 	hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
5507 					    chg * pages_per_huge_page(h), h_cg);
5508 out_err:
5509 	if (!vma || vma->vm_flags & VM_MAYSHARE)
5510 		/* Only call region_abort if the region_chg succeeded but the
5511 		 * region_add failed or didn't run.
5512 		 */
5513 		if (chg >= 0 && add < 0)
5514 			region_abort(resv_map, from, to, regions_needed);
5515 	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
5516 		kref_put(&resv_map->refs, resv_map_release);
5517 	return false;
5518 }
5519 
5520 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
5521 								long freed)
5522 {
5523 	struct hstate *h = hstate_inode(inode);
5524 	struct resv_map *resv_map = inode_resv_map(inode);
5525 	long chg = 0;
5526 	struct hugepage_subpool *spool = subpool_inode(inode);
5527 	long gbl_reserve;
5528 
5529 	/*
5530 	 * Since this routine can be called in the evict inode path for all
5531 	 * hugetlbfs inodes, resv_map could be NULL.
5532 	 */
5533 	if (resv_map) {
5534 		chg = region_del(resv_map, start, end);
5535 		/*
5536 		 * region_del() can fail in the rare case where a region
5537 		 * must be split and another region descriptor can not be
5538 		 * allocated.  If end == LONG_MAX, it will not fail.
5539 		 */
5540 		if (chg < 0)
5541 			return chg;
5542 	}
5543 
5544 	spin_lock(&inode->i_lock);
5545 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
5546 	spin_unlock(&inode->i_lock);
5547 
5548 	/*
5549 	 * If the subpool has a minimum size, the number of global
5550 	 * reservations to be released may be adjusted.
5551 	 *
5552 	 * Note that !resv_map implies freed == 0. So (chg - freed)
5553 	 * won't go negative.
5554 	 */
5555 	gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
5556 	hugetlb_acct_memory(h, -gbl_reserve);
5557 
5558 	return 0;
5559 }
5560 
5561 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
5562 static unsigned long page_table_shareable(struct vm_area_struct *svma,
5563 				struct vm_area_struct *vma,
5564 				unsigned long addr, pgoff_t idx)
5565 {
5566 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
5567 				svma->vm_start;
5568 	unsigned long sbase = saddr & PUD_MASK;
5569 	unsigned long s_end = sbase + PUD_SIZE;
5570 
5571 	/* Allow segments to share if only one is marked locked */
5572 	unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
5573 	unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
5574 
5575 	/*
5576 	 * match the virtual addresses, permission and the alignment of the
5577 	 * page table page.
5578 	 */
5579 	if (pmd_index(addr) != pmd_index(saddr) ||
5580 	    vm_flags != svm_flags ||
5581 	    !range_in_vma(svma, sbase, s_end))
5582 		return 0;
5583 
5584 	return saddr;
5585 }
5586 
5587 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
5588 {
5589 	unsigned long base = addr & PUD_MASK;
5590 	unsigned long end = base + PUD_SIZE;
5591 
5592 	/*
5593 	 * check on proper vm_flags and page table alignment
5594 	 */
5595 	if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
5596 		return true;
5597 	return false;
5598 }
5599 
5600 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
5601 {
5602 #ifdef CONFIG_USERFAULTFD
5603 	if (uffd_disable_huge_pmd_share(vma))
5604 		return false;
5605 #endif
5606 	return vma_shareable(vma, addr);
5607 }
5608 
5609 /*
5610  * Determine if start,end range within vma could be mapped by shared pmd.
5611  * If yes, adjust start and end to cover range associated with possible
5612  * shared pmd mappings.
5613  */
5614 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
5615 				unsigned long *start, unsigned long *end)
5616 {
5617 	unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
5618 		v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
5619 
5620 	/*
5621 	 * vma needs to span at least one aligned PUD size, and the range
5622 	 * must be at least partially within in.
5623 	 */
5624 	if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
5625 		(*end <= v_start) || (*start >= v_end))
5626 		return;
5627 
5628 	/* Extend the range to be PUD aligned for a worst case scenario */
5629 	if (*start > v_start)
5630 		*start = ALIGN_DOWN(*start, PUD_SIZE);
5631 
5632 	if (*end < v_end)
5633 		*end = ALIGN(*end, PUD_SIZE);
5634 }
5635 
5636 /*
5637  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
5638  * and returns the corresponding pte. While this is not necessary for the
5639  * !shared pmd case because we can allocate the pmd later as well, it makes the
5640  * code much cleaner.
5641  *
5642  * This routine must be called with i_mmap_rwsem held in at least read mode if
5643  * sharing is possible.  For hugetlbfs, this prevents removal of any page
5644  * table entries associated with the address space.  This is important as we
5645  * are setting up sharing based on existing page table entries (mappings).
5646  *
5647  * NOTE: This routine is only called from huge_pte_alloc.  Some callers of
5648  * huge_pte_alloc know that sharing is not possible and do not take
5649  * i_mmap_rwsem as a performance optimization.  This is handled by the
5650  * if !vma_shareable check at the beginning of the routine. i_mmap_rwsem is
5651  * only required for subsequent processing.
5652  */
5653 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
5654 		      unsigned long addr, pud_t *pud)
5655 {
5656 	struct address_space *mapping = vma->vm_file->f_mapping;
5657 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
5658 			vma->vm_pgoff;
5659 	struct vm_area_struct *svma;
5660 	unsigned long saddr;
5661 	pte_t *spte = NULL;
5662 	pte_t *pte;
5663 	spinlock_t *ptl;
5664 
5665 	i_mmap_assert_locked(mapping);
5666 	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
5667 		if (svma == vma)
5668 			continue;
5669 
5670 		saddr = page_table_shareable(svma, vma, addr, idx);
5671 		if (saddr) {
5672 			spte = huge_pte_offset(svma->vm_mm, saddr,
5673 					       vma_mmu_pagesize(svma));
5674 			if (spte) {
5675 				get_page(virt_to_page(spte));
5676 				break;
5677 			}
5678 		}
5679 	}
5680 
5681 	if (!spte)
5682 		goto out;
5683 
5684 	ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
5685 	if (pud_none(*pud)) {
5686 		pud_populate(mm, pud,
5687 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
5688 		mm_inc_nr_pmds(mm);
5689 	} else {
5690 		put_page(virt_to_page(spte));
5691 	}
5692 	spin_unlock(ptl);
5693 out:
5694 	pte = (pte_t *)pmd_alloc(mm, pud, addr);
5695 	return pte;
5696 }
5697 
5698 /*
5699  * unmap huge page backed by shared pte.
5700  *
5701  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
5702  * indicated by page_count > 1, unmap is achieved by clearing pud and
5703  * decrementing the ref count. If count == 1, the pte page is not shared.
5704  *
5705  * Called with page table lock held and i_mmap_rwsem held in write mode.
5706  *
5707  * returns: 1 successfully unmapped a shared pte page
5708  *	    0 the underlying pte page is not shared, or it is the last user
5709  */
5710 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
5711 					unsigned long *addr, pte_t *ptep)
5712 {
5713 	pgd_t *pgd = pgd_offset(mm, *addr);
5714 	p4d_t *p4d = p4d_offset(pgd, *addr);
5715 	pud_t *pud = pud_offset(p4d, *addr);
5716 
5717 	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
5718 	BUG_ON(page_count(virt_to_page(ptep)) == 0);
5719 	if (page_count(virt_to_page(ptep)) == 1)
5720 		return 0;
5721 
5722 	pud_clear(pud);
5723 	put_page(virt_to_page(ptep));
5724 	mm_dec_nr_pmds(mm);
5725 	*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
5726 	return 1;
5727 }
5728 
5729 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
5730 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
5731 		      unsigned long addr, pud_t *pud)
5732 {
5733 	return NULL;
5734 }
5735 
5736 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
5737 				unsigned long *addr, pte_t *ptep)
5738 {
5739 	return 0;
5740 }
5741 
5742 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
5743 				unsigned long *start, unsigned long *end)
5744 {
5745 }
5746 
5747 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
5748 {
5749 	return false;
5750 }
5751 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
5752 
5753 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
5754 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
5755 			unsigned long addr, unsigned long sz)
5756 {
5757 	pgd_t *pgd;
5758 	p4d_t *p4d;
5759 	pud_t *pud;
5760 	pte_t *pte = NULL;
5761 
5762 	pgd = pgd_offset(mm, addr);
5763 	p4d = p4d_alloc(mm, pgd, addr);
5764 	if (!p4d)
5765 		return NULL;
5766 	pud = pud_alloc(mm, p4d, addr);
5767 	if (pud) {
5768 		if (sz == PUD_SIZE) {
5769 			pte = (pte_t *)pud;
5770 		} else {
5771 			BUG_ON(sz != PMD_SIZE);
5772 			if (want_pmd_share(vma, addr) && pud_none(*pud))
5773 				pte = huge_pmd_share(mm, vma, addr, pud);
5774 			else
5775 				pte = (pte_t *)pmd_alloc(mm, pud, addr);
5776 		}
5777 	}
5778 	BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
5779 
5780 	return pte;
5781 }
5782 
5783 /*
5784  * huge_pte_offset() - Walk the page table to resolve the hugepage
5785  * entry at address @addr
5786  *
5787  * Return: Pointer to page table entry (PUD or PMD) for
5788  * address @addr, or NULL if a !p*d_present() entry is encountered and the
5789  * size @sz doesn't match the hugepage size at this level of the page
5790  * table.
5791  */
5792 pte_t *huge_pte_offset(struct mm_struct *mm,
5793 		       unsigned long addr, unsigned long sz)
5794 {
5795 	pgd_t *pgd;
5796 	p4d_t *p4d;
5797 	pud_t *pud;
5798 	pmd_t *pmd;
5799 
5800 	pgd = pgd_offset(mm, addr);
5801 	if (!pgd_present(*pgd))
5802 		return NULL;
5803 	p4d = p4d_offset(pgd, addr);
5804 	if (!p4d_present(*p4d))
5805 		return NULL;
5806 
5807 	pud = pud_offset(p4d, addr);
5808 	if (sz == PUD_SIZE)
5809 		/* must be pud huge, non-present or none */
5810 		return (pte_t *)pud;
5811 	if (!pud_present(*pud))
5812 		return NULL;
5813 	/* must have a valid entry and size to go further */
5814 
5815 	pmd = pmd_offset(pud, addr);
5816 	/* must be pmd huge, non-present or none */
5817 	return (pte_t *)pmd;
5818 }
5819 
5820 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
5821 
5822 /*
5823  * These functions are overwritable if your architecture needs its own
5824  * behavior.
5825  */
5826 struct page * __weak
5827 follow_huge_addr(struct mm_struct *mm, unsigned long address,
5828 			      int write)
5829 {
5830 	return ERR_PTR(-EINVAL);
5831 }
5832 
5833 struct page * __weak
5834 follow_huge_pd(struct vm_area_struct *vma,
5835 	       unsigned long address, hugepd_t hpd, int flags, int pdshift)
5836 {
5837 	WARN(1, "hugepd follow called with no support for hugepage directory format\n");
5838 	return NULL;
5839 }
5840 
5841 struct page * __weak
5842 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
5843 		pmd_t *pmd, int flags)
5844 {
5845 	struct page *page = NULL;
5846 	spinlock_t *ptl;
5847 	pte_t pte;
5848 
5849 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
5850 	if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
5851 			 (FOLL_PIN | FOLL_GET)))
5852 		return NULL;
5853 
5854 retry:
5855 	ptl = pmd_lockptr(mm, pmd);
5856 	spin_lock(ptl);
5857 	/*
5858 	 * make sure that the address range covered by this pmd is not
5859 	 * unmapped from other threads.
5860 	 */
5861 	if (!pmd_huge(*pmd))
5862 		goto out;
5863 	pte = huge_ptep_get((pte_t *)pmd);
5864 	if (pte_present(pte)) {
5865 		page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
5866 		/*
5867 		 * try_grab_page() should always succeed here, because: a) we
5868 		 * hold the pmd (ptl) lock, and b) we've just checked that the
5869 		 * huge pmd (head) page is present in the page tables. The ptl
5870 		 * prevents the head page and tail pages from being rearranged
5871 		 * in any way. So this page must be available at this point,
5872 		 * unless the page refcount overflowed:
5873 		 */
5874 		if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
5875 			page = NULL;
5876 			goto out;
5877 		}
5878 	} else {
5879 		if (is_hugetlb_entry_migration(pte)) {
5880 			spin_unlock(ptl);
5881 			__migration_entry_wait(mm, (pte_t *)pmd, ptl);
5882 			goto retry;
5883 		}
5884 		/*
5885 		 * hwpoisoned entry is treated as no_page_table in
5886 		 * follow_page_mask().
5887 		 */
5888 	}
5889 out:
5890 	spin_unlock(ptl);
5891 	return page;
5892 }
5893 
5894 struct page * __weak
5895 follow_huge_pud(struct mm_struct *mm, unsigned long address,
5896 		pud_t *pud, int flags)
5897 {
5898 	if (flags & (FOLL_GET | FOLL_PIN))
5899 		return NULL;
5900 
5901 	return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
5902 }
5903 
5904 struct page * __weak
5905 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
5906 {
5907 	if (flags & (FOLL_GET | FOLL_PIN))
5908 		return NULL;
5909 
5910 	return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
5911 }
5912 
5913 bool isolate_huge_page(struct page *page, struct list_head *list)
5914 {
5915 	bool ret = true;
5916 
5917 	spin_lock_irq(&hugetlb_lock);
5918 	if (!PageHeadHuge(page) ||
5919 	    !HPageMigratable(page) ||
5920 	    !get_page_unless_zero(page)) {
5921 		ret = false;
5922 		goto unlock;
5923 	}
5924 	ClearHPageMigratable(page);
5925 	list_move_tail(&page->lru, list);
5926 unlock:
5927 	spin_unlock_irq(&hugetlb_lock);
5928 	return ret;
5929 }
5930 
5931 int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
5932 {
5933 	int ret = 0;
5934 
5935 	*hugetlb = false;
5936 	spin_lock_irq(&hugetlb_lock);
5937 	if (PageHeadHuge(page)) {
5938 		*hugetlb = true;
5939 		if (HPageFreed(page) || HPageMigratable(page))
5940 			ret = get_page_unless_zero(page);
5941 		else
5942 			ret = -EBUSY;
5943 	}
5944 	spin_unlock_irq(&hugetlb_lock);
5945 	return ret;
5946 }
5947 
5948 void putback_active_hugepage(struct page *page)
5949 {
5950 	spin_lock_irq(&hugetlb_lock);
5951 	SetHPageMigratable(page);
5952 	list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
5953 	spin_unlock_irq(&hugetlb_lock);
5954 	put_page(page);
5955 }
5956 
5957 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
5958 {
5959 	struct hstate *h = page_hstate(oldpage);
5960 
5961 	hugetlb_cgroup_migrate(oldpage, newpage);
5962 	set_page_owner_migrate_reason(newpage, reason);
5963 
5964 	/*
5965 	 * transfer temporary state of the new huge page. This is
5966 	 * reverse to other transitions because the newpage is going to
5967 	 * be final while the old one will be freed so it takes over
5968 	 * the temporary status.
5969 	 *
5970 	 * Also note that we have to transfer the per-node surplus state
5971 	 * here as well otherwise the global surplus count will not match
5972 	 * the per-node's.
5973 	 */
5974 	if (HPageTemporary(newpage)) {
5975 		int old_nid = page_to_nid(oldpage);
5976 		int new_nid = page_to_nid(newpage);
5977 
5978 		SetHPageTemporary(oldpage);
5979 		ClearHPageTemporary(newpage);
5980 
5981 		/*
5982 		 * There is no need to transfer the per-node surplus state
5983 		 * when we do not cross the node.
5984 		 */
5985 		if (new_nid == old_nid)
5986 			return;
5987 		spin_lock_irq(&hugetlb_lock);
5988 		if (h->surplus_huge_pages_node[old_nid]) {
5989 			h->surplus_huge_pages_node[old_nid]--;
5990 			h->surplus_huge_pages_node[new_nid]++;
5991 		}
5992 		spin_unlock_irq(&hugetlb_lock);
5993 	}
5994 }
5995 
5996 /*
5997  * This function will unconditionally remove all the shared pmd pgtable entries
5998  * within the specific vma for a hugetlbfs memory range.
5999  */
6000 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
6001 {
6002 	struct hstate *h = hstate_vma(vma);
6003 	unsigned long sz = huge_page_size(h);
6004 	struct mm_struct *mm = vma->vm_mm;
6005 	struct mmu_notifier_range range;
6006 	unsigned long address, start, end;
6007 	spinlock_t *ptl;
6008 	pte_t *ptep;
6009 
6010 	if (!(vma->vm_flags & VM_MAYSHARE))
6011 		return;
6012 
6013 	start = ALIGN(vma->vm_start, PUD_SIZE);
6014 	end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
6015 
6016 	if (start >= end)
6017 		return;
6018 
6019 	/*
6020 	 * No need to call adjust_range_if_pmd_sharing_possible(), because
6021 	 * we have already done the PUD_SIZE alignment.
6022 	 */
6023 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
6024 				start, end);
6025 	mmu_notifier_invalidate_range_start(&range);
6026 	i_mmap_lock_write(vma->vm_file->f_mapping);
6027 	for (address = start; address < end; address += PUD_SIZE) {
6028 		unsigned long tmp = address;
6029 
6030 		ptep = huge_pte_offset(mm, address, sz);
6031 		if (!ptep)
6032 			continue;
6033 		ptl = huge_pte_lock(h, mm, ptep);
6034 		/* We don't want 'address' to be changed */
6035 		huge_pmd_unshare(mm, vma, &tmp, ptep);
6036 		spin_unlock(ptl);
6037 	}
6038 	flush_hugetlb_tlb_range(vma, start, end);
6039 	i_mmap_unlock_write(vma->vm_file->f_mapping);
6040 	/*
6041 	 * No need to call mmu_notifier_invalidate_range(), see
6042 	 * Documentation/vm/mmu_notifier.rst.
6043 	 */
6044 	mmu_notifier_invalidate_range_end(&range);
6045 }
6046 
6047 #ifdef CONFIG_CMA
6048 static bool cma_reserve_called __initdata;
6049 
6050 static int __init cmdline_parse_hugetlb_cma(char *p)
6051 {
6052 	hugetlb_cma_size = memparse(p, &p);
6053 	return 0;
6054 }
6055 
6056 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
6057 
6058 void __init hugetlb_cma_reserve(int order)
6059 {
6060 	unsigned long size, reserved, per_node;
6061 	int nid;
6062 
6063 	cma_reserve_called = true;
6064 
6065 	if (!hugetlb_cma_size)
6066 		return;
6067 
6068 	if (hugetlb_cma_size < (PAGE_SIZE << order)) {
6069 		pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
6070 			(PAGE_SIZE << order) / SZ_1M);
6071 		return;
6072 	}
6073 
6074 	/*
6075 	 * If 3 GB area is requested on a machine with 4 numa nodes,
6076 	 * let's allocate 1 GB on first three nodes and ignore the last one.
6077 	 */
6078 	per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
6079 	pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
6080 		hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
6081 
6082 	reserved = 0;
6083 	for_each_node_state(nid, N_ONLINE) {
6084 		int res;
6085 		char name[CMA_MAX_NAME];
6086 
6087 		size = min(per_node, hugetlb_cma_size - reserved);
6088 		size = round_up(size, PAGE_SIZE << order);
6089 
6090 		snprintf(name, sizeof(name), "hugetlb%d", nid);
6091 		res = cma_declare_contiguous_nid(0, size, 0, PAGE_SIZE << order,
6092 						 0, false, name,
6093 						 &hugetlb_cma[nid], nid);
6094 		if (res) {
6095 			pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
6096 				res, nid);
6097 			continue;
6098 		}
6099 
6100 		reserved += size;
6101 		pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
6102 			size / SZ_1M, nid);
6103 
6104 		if (reserved >= hugetlb_cma_size)
6105 			break;
6106 	}
6107 }
6108 
6109 void __init hugetlb_cma_check(void)
6110 {
6111 	if (!hugetlb_cma_size || cma_reserve_called)
6112 		return;
6113 
6114 	pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
6115 }
6116 
6117 #endif /* CONFIG_CMA */
6118