xref: /linux/mm/hugetlb.c (revision 44331bd6a6107a33f8082521b227ffa4ec063a40)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic hugetlb support.
4  * (C) Nadia Yvette Chambers, April 2004
5  */
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/highmem.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/compiler.h>
16 #include <linux/cpumask.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/memblock.h>
20 #include <linux/minmax.h>
21 #include <linux/slab.h>
22 #include <linux/sched/mm.h>
23 #include <linux/mmdebug.h>
24 #include <linux/sched/signal.h>
25 #include <linux/rmap.h>
26 #include <linux/string_choices.h>
27 #include <linux/string_helpers.h>
28 #include <linux/swap.h>
29 #include <linux/leafops.h>
30 #include <linux/jhash.h>
31 #include <linux/numa.h>
32 #include <linux/llist.h>
33 #include <linux/cma.h>
34 #include <linux/migrate.h>
35 #include <linux/nospec.h>
36 #include <linux/delayacct.h>
37 #include <linux/memory.h>
38 #include <linux/mm_inline.h>
39 #include <linux/padata.h>
40 #include <linux/pgalloc.h>
41 
42 #include <asm/page.h>
43 #include <asm/tlb.h>
44 #include <asm/setup.h>
45 
46 #include <linux/io.h>
47 #include <linux/node.h>
48 #include <linux/page_owner.h>
49 #include "internal.h"
50 #include "hugetlb_vmemmap.h"
51 #include "hugetlb_cma.h"
52 #include "hugetlb_internal.h"
53 #include <linux/page-isolation.h>
54 
55 int hugetlb_max_hstate __read_mostly;
56 unsigned int default_hstate_idx;
57 struct hstate hstates[HUGE_MAX_HSTATE];
58 
59 __initdata nodemask_t hugetlb_bootmem_nodes;
60 __initdata struct list_head huge_boot_pages[MAX_NUMNODES];
61 static unsigned long hstate_boot_nrinvalid[HUGE_MAX_HSTATE] __initdata;
62 
63 /*
64  * Due to ordering constraints across the init code for various
65  * architectures, hugetlb hstate cmdline parameters can't simply
66  * be early_param. early_param might call the setup function
67  * before valid hugetlb page sizes are determined, leading to
68  * incorrect rejection of valid hugepagesz= options.
69  *
70  * So, record the parameters early and consume them whenever the
71  * init code is ready for them, by calling hugetlb_parse_params().
72  */
73 
74 /* one (hugepagesz=,hugepages=) pair per hstate, one default_hugepagesz */
75 #define HUGE_MAX_CMDLINE_ARGS	(2 * HUGE_MAX_HSTATE + 1)
76 struct hugetlb_cmdline {
77 	char *val;
78 	int (*setup)(char *val);
79 };
80 
81 /* for command line parsing */
82 static struct hstate * __initdata parsed_hstate;
83 static unsigned long __initdata default_hstate_max_huge_pages;
84 static bool __initdata parsed_valid_hugepagesz = true;
85 static bool __initdata parsed_default_hugepagesz;
86 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
87 static unsigned long hugepage_allocation_threads __initdata;
88 
89 static char hstate_cmdline_buf[COMMAND_LINE_SIZE] __initdata;
90 static int hstate_cmdline_index __initdata;
91 static struct hugetlb_cmdline hugetlb_params[HUGE_MAX_CMDLINE_ARGS] __initdata;
92 static int hugetlb_param_index __initdata;
93 static __init int hugetlb_add_param(char *s, int (*setup)(char *val));
94 static __init void hugetlb_parse_params(void);
95 
96 #define hugetlb_early_param(str, func) \
97 static __init int func##args(char *s) \
98 { \
99 	return hugetlb_add_param(s, func); \
100 } \
101 early_param(str, func##args)
102 
103 /*
104  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
105  * free_huge_pages, and surplus_huge_pages.
106  */
107 __cacheline_aligned_in_smp DEFINE_SPINLOCK(hugetlb_lock);
108 
109 /*
110  * Serializes faults on the same logical page.  This is used to
111  * prevent spurious OOMs when the hugepage pool is fully utilized.
112  */
113 static int num_fault_mutexes __ro_after_init;
114 struct mutex *hugetlb_fault_mutex_table __ro_after_init;
115 
116 /* Forward declaration */
117 static int hugetlb_acct_memory(struct hstate *h, long delta);
118 static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
119 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
120 static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
121 		unsigned long start, unsigned long end, bool take_locks);
122 static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
123 
124 static inline bool subpool_is_free(struct hugepage_subpool *spool)
125 {
126 	if (spool->count)
127 		return false;
128 	if (spool->max_hpages != -1)
129 		return spool->used_hpages == 0;
130 	if (spool->min_hpages != -1)
131 		return spool->rsv_hpages == spool->min_hpages;
132 
133 	return true;
134 }
135 
136 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
137 						unsigned long irq_flags)
138 {
139 	spin_unlock_irqrestore(&spool->lock, irq_flags);
140 
141 	/* If no pages are used, and no other handles to the subpool
142 	 * remain, give up any reservations based on minimum size and
143 	 * free the subpool */
144 	if (subpool_is_free(spool)) {
145 		if (spool->min_hpages != -1)
146 			hugetlb_acct_memory(spool->hstate,
147 						-spool->min_hpages);
148 		kfree(spool);
149 	}
150 }
151 
152 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
153 						long min_hpages)
154 {
155 	struct hugepage_subpool *spool;
156 
157 	spool = kzalloc(sizeof(*spool), GFP_KERNEL);
158 	if (!spool)
159 		return NULL;
160 
161 	spin_lock_init(&spool->lock);
162 	spool->count = 1;
163 	spool->max_hpages = max_hpages;
164 	spool->hstate = h;
165 	spool->min_hpages = min_hpages;
166 
167 	if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
168 		kfree(spool);
169 		return NULL;
170 	}
171 	spool->rsv_hpages = min_hpages;
172 
173 	return spool;
174 }
175 
176 void hugepage_put_subpool(struct hugepage_subpool *spool)
177 {
178 	unsigned long flags;
179 
180 	spin_lock_irqsave(&spool->lock, flags);
181 	BUG_ON(!spool->count);
182 	spool->count--;
183 	unlock_or_release_subpool(spool, flags);
184 }
185 
186 /*
187  * Subpool accounting for allocating and reserving pages.
188  * Return -ENOMEM if there are not enough resources to satisfy the
189  * request.  Otherwise, return the number of pages by which the
190  * global pools must be adjusted (upward).  The returned value may
191  * only be different than the passed value (delta) in the case where
192  * a subpool minimum size must be maintained.
193  */
194 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
195 				      long delta)
196 {
197 	long ret = delta;
198 
199 	if (!spool)
200 		return ret;
201 
202 	spin_lock_irq(&spool->lock);
203 
204 	if (spool->max_hpages != -1) {		/* maximum size accounting */
205 		if ((spool->used_hpages + delta) <= spool->max_hpages)
206 			spool->used_hpages += delta;
207 		else {
208 			ret = -ENOMEM;
209 			goto unlock_ret;
210 		}
211 	}
212 
213 	/* minimum size accounting */
214 	if (spool->min_hpages != -1 && spool->rsv_hpages) {
215 		if (delta > spool->rsv_hpages) {
216 			/*
217 			 * Asking for more reserves than those already taken on
218 			 * behalf of subpool.  Return difference.
219 			 */
220 			ret = delta - spool->rsv_hpages;
221 			spool->rsv_hpages = 0;
222 		} else {
223 			ret = 0;	/* reserves already accounted for */
224 			spool->rsv_hpages -= delta;
225 		}
226 	}
227 
228 unlock_ret:
229 	spin_unlock_irq(&spool->lock);
230 	return ret;
231 }
232 
233 /*
234  * Subpool accounting for freeing and unreserving pages.
235  * Return the number of global page reservations that must be dropped.
236  * The return value may only be different than the passed value (delta)
237  * in the case where a subpool minimum size must be maintained.
238  */
239 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
240 				       long delta)
241 {
242 	long ret = delta;
243 	unsigned long flags;
244 
245 	if (!spool)
246 		return delta;
247 
248 	spin_lock_irqsave(&spool->lock, flags);
249 
250 	if (spool->max_hpages != -1)		/* maximum size accounting */
251 		spool->used_hpages -= delta;
252 
253 	 /* minimum size accounting */
254 	if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
255 		if (spool->rsv_hpages + delta <= spool->min_hpages)
256 			ret = 0;
257 		else
258 			ret = spool->rsv_hpages + delta - spool->min_hpages;
259 
260 		spool->rsv_hpages += delta;
261 		if (spool->rsv_hpages > spool->min_hpages)
262 			spool->rsv_hpages = spool->min_hpages;
263 	}
264 
265 	/*
266 	 * If hugetlbfs_put_super couldn't free spool due to an outstanding
267 	 * quota reference, free it now.
268 	 */
269 	unlock_or_release_subpool(spool, flags);
270 
271 	return ret;
272 }
273 
274 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
275 {
276 	return subpool_inode(file_inode(vma->vm_file));
277 }
278 
279 /*
280  * hugetlb vma_lock helper routines
281  */
282 void hugetlb_vma_lock_read(struct vm_area_struct *vma)
283 {
284 	if (__vma_shareable_lock(vma)) {
285 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
286 
287 		down_read(&vma_lock->rw_sema);
288 	} else if (__vma_private_lock(vma)) {
289 		struct resv_map *resv_map = vma_resv_map(vma);
290 
291 		down_read(&resv_map->rw_sema);
292 	}
293 }
294 
295 void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
296 {
297 	if (__vma_shareable_lock(vma)) {
298 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
299 
300 		up_read(&vma_lock->rw_sema);
301 	} else if (__vma_private_lock(vma)) {
302 		struct resv_map *resv_map = vma_resv_map(vma);
303 
304 		up_read(&resv_map->rw_sema);
305 	}
306 }
307 
308 void hugetlb_vma_lock_write(struct vm_area_struct *vma)
309 {
310 	if (__vma_shareable_lock(vma)) {
311 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
312 
313 		down_write(&vma_lock->rw_sema);
314 	} else if (__vma_private_lock(vma)) {
315 		struct resv_map *resv_map = vma_resv_map(vma);
316 
317 		down_write(&resv_map->rw_sema);
318 	}
319 }
320 
321 void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
322 {
323 	if (__vma_shareable_lock(vma)) {
324 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
325 
326 		up_write(&vma_lock->rw_sema);
327 	} else if (__vma_private_lock(vma)) {
328 		struct resv_map *resv_map = vma_resv_map(vma);
329 
330 		up_write(&resv_map->rw_sema);
331 	}
332 }
333 
334 int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
335 {
336 
337 	if (__vma_shareable_lock(vma)) {
338 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
339 
340 		return down_write_trylock(&vma_lock->rw_sema);
341 	} else if (__vma_private_lock(vma)) {
342 		struct resv_map *resv_map = vma_resv_map(vma);
343 
344 		return down_write_trylock(&resv_map->rw_sema);
345 	}
346 
347 	return 1;
348 }
349 
350 void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
351 {
352 	if (__vma_shareable_lock(vma)) {
353 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
354 
355 		lockdep_assert_held(&vma_lock->rw_sema);
356 	} else if (__vma_private_lock(vma)) {
357 		struct resv_map *resv_map = vma_resv_map(vma);
358 
359 		lockdep_assert_held(&resv_map->rw_sema);
360 	}
361 }
362 
363 void hugetlb_vma_lock_release(struct kref *kref)
364 {
365 	struct hugetlb_vma_lock *vma_lock = container_of(kref,
366 			struct hugetlb_vma_lock, refs);
367 
368 	kfree(vma_lock);
369 }
370 
371 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
372 {
373 	struct vm_area_struct *vma = vma_lock->vma;
374 
375 	/*
376 	 * vma_lock structure may or not be released as a result of put,
377 	 * it certainly will no longer be attached to vma so clear pointer.
378 	 * Semaphore synchronizes access to vma_lock->vma field.
379 	 */
380 	vma_lock->vma = NULL;
381 	vma->vm_private_data = NULL;
382 	up_write(&vma_lock->rw_sema);
383 	kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
384 }
385 
386 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
387 {
388 	if (__vma_shareable_lock(vma)) {
389 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
390 
391 		__hugetlb_vma_unlock_write_put(vma_lock);
392 	} else if (__vma_private_lock(vma)) {
393 		struct resv_map *resv_map = vma_resv_map(vma);
394 
395 		/* no free for anon vmas, but still need to unlock */
396 		up_write(&resv_map->rw_sema);
397 	}
398 }
399 
400 static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
401 {
402 	/*
403 	 * Only present in sharable vmas.
404 	 */
405 	if (!vma || !__vma_shareable_lock(vma))
406 		return;
407 
408 	if (vma->vm_private_data) {
409 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
410 
411 		down_write(&vma_lock->rw_sema);
412 		__hugetlb_vma_unlock_write_put(vma_lock);
413 	}
414 }
415 
416 /*
417  * vma specific semaphore used for pmd sharing and fault/truncation
418  * synchronization
419  */
420 int hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
421 {
422 	struct hugetlb_vma_lock *vma_lock;
423 
424 	/* Only establish in (flags) sharable vmas */
425 	if (!vma || !(vma->vm_flags & VM_MAYSHARE))
426 		return 0;
427 
428 	/* Should never get here with non-NULL vm_private_data */
429 	if (vma->vm_private_data)
430 		return -EINVAL;
431 
432 	vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
433 	if (!vma_lock) {
434 		/*
435 		 * If we can not allocate structure, then vma can not
436 		 * participate in pmd sharing.  This is only a possible
437 		 * performance enhancement and memory saving issue.
438 		 * However, the lock is also used to synchronize page
439 		 * faults with truncation.  If the lock is not present,
440 		 * unlikely races could leave pages in a file past i_size
441 		 * until the file is removed.  Warn in the unlikely case of
442 		 * allocation failure.
443 		 */
444 		pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
445 		return -EINVAL;
446 	}
447 
448 	kref_init(&vma_lock->refs);
449 	init_rwsem(&vma_lock->rw_sema);
450 	vma_lock->vma = vma;
451 	vma->vm_private_data = vma_lock;
452 
453 	return 0;
454 }
455 
456 /* Helper that removes a struct file_region from the resv_map cache and returns
457  * it for use.
458  */
459 static struct file_region *
460 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
461 {
462 	struct file_region *nrg;
463 
464 	VM_BUG_ON(resv->region_cache_count <= 0);
465 
466 	resv->region_cache_count--;
467 	nrg = list_first_entry(&resv->region_cache, struct file_region, link);
468 	list_del(&nrg->link);
469 
470 	nrg->from = from;
471 	nrg->to = to;
472 
473 	return nrg;
474 }
475 
476 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
477 					      struct file_region *rg)
478 {
479 #ifdef CONFIG_CGROUP_HUGETLB
480 	nrg->reservation_counter = rg->reservation_counter;
481 	nrg->css = rg->css;
482 	if (rg->css)
483 		css_get(rg->css);
484 #endif
485 }
486 
487 /* Helper that records hugetlb_cgroup uncharge info. */
488 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
489 						struct hstate *h,
490 						struct resv_map *resv,
491 						struct file_region *nrg)
492 {
493 #ifdef CONFIG_CGROUP_HUGETLB
494 	if (h_cg) {
495 		nrg->reservation_counter =
496 			&h_cg->rsvd_hugepage[hstate_index(h)];
497 		nrg->css = &h_cg->css;
498 		/*
499 		 * The caller will hold exactly one h_cg->css reference for the
500 		 * whole contiguous reservation region. But this area might be
501 		 * scattered when there are already some file_regions reside in
502 		 * it. As a result, many file_regions may share only one css
503 		 * reference. In order to ensure that one file_region must hold
504 		 * exactly one h_cg->css reference, we should do css_get for
505 		 * each file_region and leave the reference held by caller
506 		 * untouched.
507 		 */
508 		css_get(&h_cg->css);
509 		if (!resv->pages_per_hpage)
510 			resv->pages_per_hpage = pages_per_huge_page(h);
511 		/* pages_per_hpage should be the same for all entries in
512 		 * a resv_map.
513 		 */
514 		VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
515 	} else {
516 		nrg->reservation_counter = NULL;
517 		nrg->css = NULL;
518 	}
519 #endif
520 }
521 
522 static void put_uncharge_info(struct file_region *rg)
523 {
524 #ifdef CONFIG_CGROUP_HUGETLB
525 	if (rg->css)
526 		css_put(rg->css);
527 #endif
528 }
529 
530 static bool has_same_uncharge_info(struct file_region *rg,
531 				   struct file_region *org)
532 {
533 #ifdef CONFIG_CGROUP_HUGETLB
534 	return rg->reservation_counter == org->reservation_counter &&
535 	       rg->css == org->css;
536 
537 #else
538 	return true;
539 #endif
540 }
541 
542 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
543 {
544 	struct file_region *nrg, *prg;
545 
546 	prg = list_prev_entry(rg, link);
547 	if (&prg->link != &resv->regions && prg->to == rg->from &&
548 	    has_same_uncharge_info(prg, rg)) {
549 		prg->to = rg->to;
550 
551 		list_del(&rg->link);
552 		put_uncharge_info(rg);
553 		kfree(rg);
554 
555 		rg = prg;
556 	}
557 
558 	nrg = list_next_entry(rg, link);
559 	if (&nrg->link != &resv->regions && nrg->from == rg->to &&
560 	    has_same_uncharge_info(nrg, rg)) {
561 		nrg->from = rg->from;
562 
563 		list_del(&rg->link);
564 		put_uncharge_info(rg);
565 		kfree(rg);
566 	}
567 }
568 
569 static inline long
570 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
571 		     long to, struct hstate *h, struct hugetlb_cgroup *cg,
572 		     long *regions_needed)
573 {
574 	struct file_region *nrg;
575 
576 	if (!regions_needed) {
577 		nrg = get_file_region_entry_from_cache(map, from, to);
578 		record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
579 		list_add(&nrg->link, rg);
580 		coalesce_file_region(map, nrg);
581 	} else {
582 		*regions_needed += 1;
583 	}
584 
585 	return to - from;
586 }
587 
588 /*
589  * Must be called with resv->lock held.
590  *
591  * Calling this with regions_needed != NULL will count the number of pages
592  * to be added but will not modify the linked list. And regions_needed will
593  * indicate the number of file_regions needed in the cache to carry out to add
594  * the regions for this range.
595  */
596 static long add_reservation_in_range(struct resv_map *resv, long f, long t,
597 				     struct hugetlb_cgroup *h_cg,
598 				     struct hstate *h, long *regions_needed)
599 {
600 	long add = 0;
601 	struct list_head *head = &resv->regions;
602 	long last_accounted_offset = f;
603 	struct file_region *iter, *trg = NULL;
604 	struct list_head *rg = NULL;
605 
606 	if (regions_needed)
607 		*regions_needed = 0;
608 
609 	/* In this loop, we essentially handle an entry for the range
610 	 * [last_accounted_offset, iter->from), at every iteration, with some
611 	 * bounds checking.
612 	 */
613 	list_for_each_entry_safe(iter, trg, head, link) {
614 		/* Skip irrelevant regions that start before our range. */
615 		if (iter->from < f) {
616 			/* If this region ends after the last accounted offset,
617 			 * then we need to update last_accounted_offset.
618 			 */
619 			if (iter->to > last_accounted_offset)
620 				last_accounted_offset = iter->to;
621 			continue;
622 		}
623 
624 		/* When we find a region that starts beyond our range, we've
625 		 * finished.
626 		 */
627 		if (iter->from >= t) {
628 			rg = iter->link.prev;
629 			break;
630 		}
631 
632 		/* Add an entry for last_accounted_offset -> iter->from, and
633 		 * update last_accounted_offset.
634 		 */
635 		if (iter->from > last_accounted_offset)
636 			add += hugetlb_resv_map_add(resv, iter->link.prev,
637 						    last_accounted_offset,
638 						    iter->from, h, h_cg,
639 						    regions_needed);
640 
641 		last_accounted_offset = iter->to;
642 	}
643 
644 	/* Handle the case where our range extends beyond
645 	 * last_accounted_offset.
646 	 */
647 	if (!rg)
648 		rg = head->prev;
649 	if (last_accounted_offset < t)
650 		add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
651 					    t, h, h_cg, regions_needed);
652 
653 	return add;
654 }
655 
656 /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
657  */
658 static int allocate_file_region_entries(struct resv_map *resv,
659 					int regions_needed)
660 	__must_hold(&resv->lock)
661 {
662 	LIST_HEAD(allocated_regions);
663 	int to_allocate = 0, i = 0;
664 	struct file_region *trg = NULL, *rg = NULL;
665 
666 	VM_BUG_ON(regions_needed < 0);
667 
668 	/*
669 	 * Check for sufficient descriptors in the cache to accommodate
670 	 * the number of in progress add operations plus regions_needed.
671 	 *
672 	 * This is a while loop because when we drop the lock, some other call
673 	 * to region_add or region_del may have consumed some region_entries,
674 	 * so we keep looping here until we finally have enough entries for
675 	 * (adds_in_progress + regions_needed).
676 	 */
677 	while (resv->region_cache_count <
678 	       (resv->adds_in_progress + regions_needed)) {
679 		to_allocate = resv->adds_in_progress + regions_needed -
680 			      resv->region_cache_count;
681 
682 		/* At this point, we should have enough entries in the cache
683 		 * for all the existing adds_in_progress. We should only be
684 		 * needing to allocate for regions_needed.
685 		 */
686 		VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
687 
688 		spin_unlock(&resv->lock);
689 		for (i = 0; i < to_allocate; i++) {
690 			trg = kmalloc(sizeof(*trg), GFP_KERNEL);
691 			if (!trg)
692 				goto out_of_memory;
693 			list_add(&trg->link, &allocated_regions);
694 		}
695 
696 		spin_lock(&resv->lock);
697 
698 		list_splice(&allocated_regions, &resv->region_cache);
699 		resv->region_cache_count += to_allocate;
700 	}
701 
702 	return 0;
703 
704 out_of_memory:
705 	list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
706 		list_del(&rg->link);
707 		kfree(rg);
708 	}
709 	return -ENOMEM;
710 }
711 
712 /*
713  * Add the huge page range represented by [f, t) to the reserve
714  * map.  Regions will be taken from the cache to fill in this range.
715  * Sufficient regions should exist in the cache due to the previous
716  * call to region_chg with the same range, but in some cases the cache will not
717  * have sufficient entries due to races with other code doing region_add or
718  * region_del.  The extra needed entries will be allocated.
719  *
720  * regions_needed is the out value provided by a previous call to region_chg.
721  *
722  * Return the number of new huge pages added to the map.  This number is greater
723  * than or equal to zero.  If file_region entries needed to be allocated for
724  * this operation and we were not able to allocate, it returns -ENOMEM.
725  * region_add of regions of length 1 never allocate file_regions and cannot
726  * fail; region_chg will always allocate at least 1 entry and a region_add for
727  * 1 page will only require at most 1 entry.
728  */
729 static long region_add(struct resv_map *resv, long f, long t,
730 		       long in_regions_needed, struct hstate *h,
731 		       struct hugetlb_cgroup *h_cg)
732 {
733 	long add = 0, actual_regions_needed = 0;
734 
735 	spin_lock(&resv->lock);
736 retry:
737 
738 	/* Count how many regions are actually needed to execute this add. */
739 	add_reservation_in_range(resv, f, t, NULL, NULL,
740 				 &actual_regions_needed);
741 
742 	/*
743 	 * Check for sufficient descriptors in the cache to accommodate
744 	 * this add operation. Note that actual_regions_needed may be greater
745 	 * than in_regions_needed, as the resv_map may have been modified since
746 	 * the region_chg call. In this case, we need to make sure that we
747 	 * allocate extra entries, such that we have enough for all the
748 	 * existing adds_in_progress, plus the excess needed for this
749 	 * operation.
750 	 */
751 	if (actual_regions_needed > in_regions_needed &&
752 	    resv->region_cache_count <
753 		    resv->adds_in_progress +
754 			    (actual_regions_needed - in_regions_needed)) {
755 		/* region_add operation of range 1 should never need to
756 		 * allocate file_region entries.
757 		 */
758 		VM_BUG_ON(t - f <= 1);
759 
760 		if (allocate_file_region_entries(
761 			    resv, actual_regions_needed - in_regions_needed)) {
762 			return -ENOMEM;
763 		}
764 
765 		goto retry;
766 	}
767 
768 	add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
769 
770 	resv->adds_in_progress -= in_regions_needed;
771 
772 	spin_unlock(&resv->lock);
773 	return add;
774 }
775 
776 /*
777  * Examine the existing reserve map and determine how many
778  * huge pages in the specified range [f, t) are NOT currently
779  * represented.  This routine is called before a subsequent
780  * call to region_add that will actually modify the reserve
781  * map to add the specified range [f, t).  region_chg does
782  * not change the number of huge pages represented by the
783  * map.  A number of new file_region structures is added to the cache as a
784  * placeholder, for the subsequent region_add call to use. At least 1
785  * file_region structure is added.
786  *
787  * out_regions_needed is the number of regions added to the
788  * resv->adds_in_progress.  This value needs to be provided to a follow up call
789  * to region_add or region_abort for proper accounting.
790  *
791  * Returns the number of huge pages that need to be added to the existing
792  * reservation map for the range [f, t).  This number is greater or equal to
793  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
794  * is needed and can not be allocated.
795  */
796 static long region_chg(struct resv_map *resv, long f, long t,
797 		       long *out_regions_needed)
798 {
799 	long chg = 0;
800 
801 	spin_lock(&resv->lock);
802 
803 	/* Count how many hugepages in this range are NOT represented. */
804 	chg = add_reservation_in_range(resv, f, t, NULL, NULL,
805 				       out_regions_needed);
806 
807 	if (*out_regions_needed == 0)
808 		*out_regions_needed = 1;
809 
810 	if (allocate_file_region_entries(resv, *out_regions_needed))
811 		return -ENOMEM;
812 
813 	resv->adds_in_progress += *out_regions_needed;
814 
815 	spin_unlock(&resv->lock);
816 	return chg;
817 }
818 
819 /*
820  * Abort the in progress add operation.  The adds_in_progress field
821  * of the resv_map keeps track of the operations in progress between
822  * calls to region_chg and region_add.  Operations are sometimes
823  * aborted after the call to region_chg.  In such cases, region_abort
824  * is called to decrement the adds_in_progress counter. regions_needed
825  * is the value returned by the region_chg call, it is used to decrement
826  * the adds_in_progress counter.
827  *
828  * NOTE: The range arguments [f, t) are not needed or used in this
829  * routine.  They are kept to make reading the calling code easier as
830  * arguments will match the associated region_chg call.
831  */
832 static void region_abort(struct resv_map *resv, long f, long t,
833 			 long regions_needed)
834 {
835 	spin_lock(&resv->lock);
836 	VM_BUG_ON(!resv->region_cache_count);
837 	resv->adds_in_progress -= regions_needed;
838 	spin_unlock(&resv->lock);
839 }
840 
841 /*
842  * Delete the specified range [f, t) from the reserve map.  If the
843  * t parameter is LONG_MAX, this indicates that ALL regions after f
844  * should be deleted.  Locate the regions which intersect [f, t)
845  * and either trim, delete or split the existing regions.
846  *
847  * Returns the number of huge pages deleted from the reserve map.
848  * In the normal case, the return value is zero or more.  In the
849  * case where a region must be split, a new region descriptor must
850  * be allocated.  If the allocation fails, -ENOMEM will be returned.
851  * NOTE: If the parameter t == LONG_MAX, then we will never split
852  * a region and possibly return -ENOMEM.  Callers specifying
853  * t == LONG_MAX do not need to check for -ENOMEM error.
854  */
855 static long region_del(struct resv_map *resv, long f, long t)
856 {
857 	struct list_head *head = &resv->regions;
858 	struct file_region *rg, *trg;
859 	struct file_region *nrg = NULL;
860 	long del = 0;
861 
862 retry:
863 	spin_lock(&resv->lock);
864 	list_for_each_entry_safe(rg, trg, head, link) {
865 		/*
866 		 * Skip regions before the range to be deleted.  file_region
867 		 * ranges are normally of the form [from, to).  However, there
868 		 * may be a "placeholder" entry in the map which is of the form
869 		 * (from, to) with from == to.  Check for placeholder entries
870 		 * at the beginning of the range to be deleted.
871 		 */
872 		if (rg->to <= f && (rg->to != rg->from || rg->to != f))
873 			continue;
874 
875 		if (rg->from >= t)
876 			break;
877 
878 		if (f > rg->from && t < rg->to) { /* Must split region */
879 			/*
880 			 * Check for an entry in the cache before dropping
881 			 * lock and attempting allocation.
882 			 */
883 			if (!nrg &&
884 			    resv->region_cache_count > resv->adds_in_progress) {
885 				nrg = list_first_entry(&resv->region_cache,
886 							struct file_region,
887 							link);
888 				list_del(&nrg->link);
889 				resv->region_cache_count--;
890 			}
891 
892 			if (!nrg) {
893 				spin_unlock(&resv->lock);
894 				nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
895 				if (!nrg)
896 					return -ENOMEM;
897 				goto retry;
898 			}
899 
900 			del += t - f;
901 			hugetlb_cgroup_uncharge_file_region(
902 				resv, rg, t - f, false);
903 
904 			/* New entry for end of split region */
905 			nrg->from = t;
906 			nrg->to = rg->to;
907 
908 			copy_hugetlb_cgroup_uncharge_info(nrg, rg);
909 
910 			INIT_LIST_HEAD(&nrg->link);
911 
912 			/* Original entry is trimmed */
913 			rg->to = f;
914 
915 			list_add(&nrg->link, &rg->link);
916 			nrg = NULL;
917 			break;
918 		}
919 
920 		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
921 			del += rg->to - rg->from;
922 			hugetlb_cgroup_uncharge_file_region(resv, rg,
923 							    rg->to - rg->from, true);
924 			list_del(&rg->link);
925 			kfree(rg);
926 			continue;
927 		}
928 
929 		if (f <= rg->from) {	/* Trim beginning of region */
930 			hugetlb_cgroup_uncharge_file_region(resv, rg,
931 							    t - rg->from, false);
932 
933 			del += t - rg->from;
934 			rg->from = t;
935 		} else {		/* Trim end of region */
936 			hugetlb_cgroup_uncharge_file_region(resv, rg,
937 							    rg->to - f, false);
938 
939 			del += rg->to - f;
940 			rg->to = f;
941 		}
942 	}
943 
944 	spin_unlock(&resv->lock);
945 	kfree(nrg);
946 	return del;
947 }
948 
949 /*
950  * A rare out of memory error was encountered which prevented removal of
951  * the reserve map region for a page.  The huge page itself was free'ed
952  * and removed from the page cache.  This routine will adjust the subpool
953  * usage count, and the global reserve count if needed.  By incrementing
954  * these counts, the reserve map entry which could not be deleted will
955  * appear as a "reserved" entry instead of simply dangling with incorrect
956  * counts.
957  */
958 void hugetlb_fix_reserve_counts(struct inode *inode)
959 {
960 	struct hugepage_subpool *spool = subpool_inode(inode);
961 	long rsv_adjust;
962 	bool reserved = false;
963 
964 	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
965 	if (rsv_adjust > 0) {
966 		struct hstate *h = hstate_inode(inode);
967 
968 		if (!hugetlb_acct_memory(h, 1))
969 			reserved = true;
970 	} else if (!rsv_adjust) {
971 		reserved = true;
972 	}
973 
974 	if (!reserved)
975 		pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
976 }
977 
978 /*
979  * Count and return the number of huge pages in the reserve map
980  * that intersect with the range [f, t).
981  */
982 static long region_count(struct resv_map *resv, long f, long t)
983 {
984 	struct list_head *head = &resv->regions;
985 	struct file_region *rg;
986 	long chg = 0;
987 
988 	spin_lock(&resv->lock);
989 	/* Locate each segment we overlap with, and count that overlap. */
990 	list_for_each_entry(rg, head, link) {
991 		long seg_from;
992 		long seg_to;
993 
994 		if (rg->to <= f)
995 			continue;
996 		if (rg->from >= t)
997 			break;
998 
999 		seg_from = max(rg->from, f);
1000 		seg_to = min(rg->to, t);
1001 
1002 		chg += seg_to - seg_from;
1003 	}
1004 	spin_unlock(&resv->lock);
1005 
1006 	return chg;
1007 }
1008 
1009 /*
1010  * Convert the address within this vma to the page offset within
1011  * the mapping, huge page units here.
1012  */
1013 static pgoff_t vma_hugecache_offset(struct hstate *h,
1014 			struct vm_area_struct *vma, unsigned long address)
1015 {
1016 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
1017 			(vma->vm_pgoff >> huge_page_order(h));
1018 }
1019 
1020 /**
1021  * vma_kernel_pagesize - Page size granularity for this VMA.
1022  * @vma: The user mapping.
1023  *
1024  * Folios in this VMA will be aligned to, and at least the size of the
1025  * number of bytes returned by this function.
1026  *
1027  * Return: The default size of the folios allocated when backing a VMA.
1028  */
1029 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1030 {
1031 	if (vma->vm_ops && vma->vm_ops->pagesize)
1032 		return vma->vm_ops->pagesize(vma);
1033 	return PAGE_SIZE;
1034 }
1035 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
1036 
1037 /*
1038  * Return the page size being used by the MMU to back a VMA. In the majority
1039  * of cases, the page size used by the kernel matches the MMU size. On
1040  * architectures where it differs, an architecture-specific 'strong'
1041  * version of this symbol is required.
1042  */
1043 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1044 {
1045 	return vma_kernel_pagesize(vma);
1046 }
1047 
1048 /*
1049  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
1050  * bits of the reservation map pointer, which are always clear due to
1051  * alignment.
1052  */
1053 #define HPAGE_RESV_OWNER    (1UL << 0)
1054 #define HPAGE_RESV_UNMAPPED (1UL << 1)
1055 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
1056 
1057 /*
1058  * These helpers are used to track how many pages are reserved for
1059  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
1060  * is guaranteed to have their future faults succeed.
1061  *
1062  * With the exception of hugetlb_dup_vma_private() which is called at fork(),
1063  * the reserve counters are updated with the hugetlb_lock held. It is safe
1064  * to reset the VMA at fork() time as it is not in use yet and there is no
1065  * chance of the global counters getting corrupted as a result of the values.
1066  *
1067  * The private mapping reservation is represented in a subtly different
1068  * manner to a shared mapping.  A shared mapping has a region map associated
1069  * with the underlying file, this region map represents the backing file
1070  * pages which have ever had a reservation assigned which this persists even
1071  * after the page is instantiated.  A private mapping has a region map
1072  * associated with the original mmap which is attached to all VMAs which
1073  * reference it, this region map represents those offsets which have consumed
1074  * reservation ie. where pages have been instantiated.
1075  */
1076 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
1077 {
1078 	return (unsigned long)vma->vm_private_data;
1079 }
1080 
1081 static void set_vma_private_data(struct vm_area_struct *vma,
1082 							unsigned long value)
1083 {
1084 	vma->vm_private_data = (void *)value;
1085 }
1086 
1087 static void
1088 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
1089 					  struct hugetlb_cgroup *h_cg,
1090 					  struct hstate *h)
1091 {
1092 #ifdef CONFIG_CGROUP_HUGETLB
1093 	if (!h_cg || !h) {
1094 		resv_map->reservation_counter = NULL;
1095 		resv_map->pages_per_hpage = 0;
1096 		resv_map->css = NULL;
1097 	} else {
1098 		resv_map->reservation_counter =
1099 			&h_cg->rsvd_hugepage[hstate_index(h)];
1100 		resv_map->pages_per_hpage = pages_per_huge_page(h);
1101 		resv_map->css = &h_cg->css;
1102 	}
1103 #endif
1104 }
1105 
1106 struct resv_map *resv_map_alloc(void)
1107 {
1108 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
1109 	struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
1110 
1111 	if (!resv_map || !rg) {
1112 		kfree(resv_map);
1113 		kfree(rg);
1114 		return NULL;
1115 	}
1116 
1117 	kref_init(&resv_map->refs);
1118 	spin_lock_init(&resv_map->lock);
1119 	INIT_LIST_HEAD(&resv_map->regions);
1120 	init_rwsem(&resv_map->rw_sema);
1121 
1122 	resv_map->adds_in_progress = 0;
1123 	/*
1124 	 * Initialize these to 0. On shared mappings, 0's here indicate these
1125 	 * fields don't do cgroup accounting. On private mappings, these will be
1126 	 * re-initialized to the proper values, to indicate that hugetlb cgroup
1127 	 * reservations are to be un-charged from here.
1128 	 */
1129 	resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
1130 
1131 	INIT_LIST_HEAD(&resv_map->region_cache);
1132 	list_add(&rg->link, &resv_map->region_cache);
1133 	resv_map->region_cache_count = 1;
1134 
1135 	return resv_map;
1136 }
1137 
1138 void resv_map_release(struct kref *ref)
1139 {
1140 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
1141 	struct list_head *head = &resv_map->region_cache;
1142 	struct file_region *rg, *trg;
1143 
1144 	/* Clear out any active regions before we release the map. */
1145 	region_del(resv_map, 0, LONG_MAX);
1146 
1147 	/* ... and any entries left in the cache */
1148 	list_for_each_entry_safe(rg, trg, head, link) {
1149 		list_del(&rg->link);
1150 		kfree(rg);
1151 	}
1152 
1153 	VM_BUG_ON(resv_map->adds_in_progress);
1154 
1155 	kfree(resv_map);
1156 }
1157 
1158 static inline struct resv_map *inode_resv_map(struct inode *inode)
1159 {
1160 	/*
1161 	 * At inode evict time, i_mapping may not point to the original
1162 	 * address space within the inode.  This original address space
1163 	 * contains the pointer to the resv_map.  So, always use the
1164 	 * address space embedded within the inode.
1165 	 * The VERY common case is inode->mapping == &inode->i_data but,
1166 	 * this may not be true for device special inodes.
1167 	 */
1168 	return (struct resv_map *)(&inode->i_data)->i_private_data;
1169 }
1170 
1171 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
1172 {
1173 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1174 	if (vma->vm_flags & VM_MAYSHARE) {
1175 		struct address_space *mapping = vma->vm_file->f_mapping;
1176 		struct inode *inode = mapping->host;
1177 
1178 		return inode_resv_map(inode);
1179 
1180 	} else {
1181 		return (struct resv_map *)(get_vma_private_data(vma) &
1182 							~HPAGE_RESV_MASK);
1183 	}
1184 }
1185 
1186 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
1187 {
1188 	VM_WARN_ON_ONCE_VMA(!is_vm_hugetlb_page(vma), vma);
1189 	VM_WARN_ON_ONCE_VMA(vma->vm_flags & VM_MAYSHARE, vma);
1190 
1191 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
1192 }
1193 
1194 static void set_vma_desc_resv_map(struct vm_area_desc *desc, struct resv_map *map)
1195 {
1196 	VM_WARN_ON_ONCE(!is_vm_hugetlb_flags(desc->vm_flags));
1197 	VM_WARN_ON_ONCE(desc->vm_flags & VM_MAYSHARE);
1198 
1199 	desc->private_data = map;
1200 }
1201 
1202 static void set_vma_desc_resv_flags(struct vm_area_desc *desc, unsigned long flags)
1203 {
1204 	VM_WARN_ON_ONCE(!is_vm_hugetlb_flags(desc->vm_flags));
1205 	VM_WARN_ON_ONCE(desc->vm_flags & VM_MAYSHARE);
1206 
1207 	desc->private_data = (void *)((unsigned long)desc->private_data | flags);
1208 }
1209 
1210 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
1211 {
1212 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1213 
1214 	return (get_vma_private_data(vma) & flag) != 0;
1215 }
1216 
1217 static bool is_vma_desc_resv_set(struct vm_area_desc *desc, unsigned long flag)
1218 {
1219 	VM_WARN_ON_ONCE(!is_vm_hugetlb_flags(desc->vm_flags));
1220 
1221 	return ((unsigned long)desc->private_data) & flag;
1222 }
1223 
1224 bool __vma_private_lock(struct vm_area_struct *vma)
1225 {
1226 	return !(vma->vm_flags & VM_MAYSHARE) &&
1227 		get_vma_private_data(vma) & ~HPAGE_RESV_MASK &&
1228 		is_vma_resv_set(vma, HPAGE_RESV_OWNER);
1229 }
1230 
1231 void hugetlb_dup_vma_private(struct vm_area_struct *vma)
1232 {
1233 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1234 	/*
1235 	 * Clear vm_private_data
1236 	 * - For shared mappings this is a per-vma semaphore that may be
1237 	 *   allocated in a subsequent call to hugetlb_vm_op_open.
1238 	 *   Before clearing, make sure pointer is not associated with vma
1239 	 *   as this will leak the structure.  This is the case when called
1240 	 *   via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
1241 	 *   been called to allocate a new structure.
1242 	 * - For MAP_PRIVATE mappings, this is the reserve map which does
1243 	 *   not apply to children.  Faults generated by the children are
1244 	 *   not guaranteed to succeed, even if read-only.
1245 	 */
1246 	if (vma->vm_flags & VM_MAYSHARE) {
1247 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1248 
1249 		if (vma_lock && vma_lock->vma != vma)
1250 			vma->vm_private_data = NULL;
1251 	} else {
1252 		vma->vm_private_data = NULL;
1253 	}
1254 }
1255 
1256 /*
1257  * Reset and decrement one ref on hugepage private reservation.
1258  * Called with mm->mmap_lock writer semaphore held.
1259  * This function should be only used by mremap and operate on
1260  * same sized vma. It should never come here with last ref on the
1261  * reservation.
1262  */
1263 void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
1264 {
1265 	/*
1266 	 * Clear the old hugetlb private page reservation.
1267 	 * It has already been transferred to new_vma.
1268 	 *
1269 	 * During a mremap() operation of a hugetlb vma we call move_vma()
1270 	 * which copies vma into new_vma and unmaps vma. After the copy
1271 	 * operation both new_vma and vma share a reference to the resv_map
1272 	 * struct, and at that point vma is about to be unmapped. We don't
1273 	 * want to return the reservation to the pool at unmap of vma because
1274 	 * the reservation still lives on in new_vma, so simply decrement the
1275 	 * ref here and remove the resv_map reference from this vma.
1276 	 */
1277 	struct resv_map *reservations = vma_resv_map(vma);
1278 
1279 	if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1280 		resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
1281 		kref_put(&reservations->refs, resv_map_release);
1282 	}
1283 
1284 	hugetlb_dup_vma_private(vma);
1285 }
1286 
1287 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
1288 {
1289 	int nid = folio_nid(folio);
1290 
1291 	lockdep_assert_held(&hugetlb_lock);
1292 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1293 
1294 	list_move(&folio->lru, &h->hugepage_freelists[nid]);
1295 	h->free_huge_pages++;
1296 	h->free_huge_pages_node[nid]++;
1297 	folio_set_hugetlb_freed(folio);
1298 }
1299 
1300 static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
1301 								int nid)
1302 {
1303 	struct folio *folio;
1304 	bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1305 
1306 	lockdep_assert_held(&hugetlb_lock);
1307 	list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) {
1308 		if (pin && !folio_is_longterm_pinnable(folio))
1309 			continue;
1310 
1311 		if (folio_test_hwpoison(folio))
1312 			continue;
1313 
1314 		if (is_migrate_isolate_page(&folio->page))
1315 			continue;
1316 
1317 		list_move(&folio->lru, &h->hugepage_activelist);
1318 		folio_ref_unfreeze(folio, 1);
1319 		folio_clear_hugetlb_freed(folio);
1320 		h->free_huge_pages--;
1321 		h->free_huge_pages_node[nid]--;
1322 		return folio;
1323 	}
1324 
1325 	return NULL;
1326 }
1327 
1328 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
1329 							int nid, nodemask_t *nmask)
1330 {
1331 	unsigned int cpuset_mems_cookie;
1332 	struct zonelist *zonelist;
1333 	struct zone *zone;
1334 	struct zoneref *z;
1335 	int node = NUMA_NO_NODE;
1336 
1337 	/* 'nid' should not be NUMA_NO_NODE. Try to catch any misuse of it and rectifiy. */
1338 	if (nid == NUMA_NO_NODE)
1339 		nid = numa_node_id();
1340 
1341 	zonelist = node_zonelist(nid, gfp_mask);
1342 
1343 retry_cpuset:
1344 	cpuset_mems_cookie = read_mems_allowed_begin();
1345 	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1346 		struct folio *folio;
1347 
1348 		if (!cpuset_zone_allowed(zone, gfp_mask))
1349 			continue;
1350 		/*
1351 		 * no need to ask again on the same node. Pool is node rather than
1352 		 * zone aware
1353 		 */
1354 		if (zone_to_nid(zone) == node)
1355 			continue;
1356 		node = zone_to_nid(zone);
1357 
1358 		folio = dequeue_hugetlb_folio_node_exact(h, node);
1359 		if (folio)
1360 			return folio;
1361 	}
1362 	if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1363 		goto retry_cpuset;
1364 
1365 	return NULL;
1366 }
1367 
1368 static unsigned long available_huge_pages(struct hstate *h)
1369 {
1370 	return h->free_huge_pages - h->resv_huge_pages;
1371 }
1372 
1373 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
1374 				struct vm_area_struct *vma,
1375 				unsigned long address, long gbl_chg)
1376 {
1377 	struct folio *folio = NULL;
1378 	struct mempolicy *mpol;
1379 	gfp_t gfp_mask;
1380 	nodemask_t *nodemask;
1381 	int nid;
1382 
1383 	/*
1384 	 * gbl_chg==1 means the allocation requires a new page that was not
1385 	 * reserved before.  Making sure there's at least one free page.
1386 	 */
1387 	if (gbl_chg && !available_huge_pages(h))
1388 		goto err;
1389 
1390 	gfp_mask = htlb_alloc_mask(h);
1391 	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1392 
1393 	if (mpol_is_preferred_many(mpol)) {
1394 		folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1395 							nid, nodemask);
1396 
1397 		/* Fallback to all nodes if page==NULL */
1398 		nodemask = NULL;
1399 	}
1400 
1401 	if (!folio)
1402 		folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1403 							nid, nodemask);
1404 
1405 	mpol_cond_put(mpol);
1406 	return folio;
1407 
1408 err:
1409 	return NULL;
1410 }
1411 
1412 #if defined(CONFIG_ARCH_HAS_GIGANTIC_PAGE) && defined(CONFIG_CONTIG_ALLOC)
1413 static struct folio *alloc_gigantic_frozen_folio(int order, gfp_t gfp_mask,
1414 		int nid, nodemask_t *nodemask)
1415 {
1416 	struct folio *folio;
1417 
1418 	folio = hugetlb_cma_alloc_frozen_folio(order, gfp_mask, nid, nodemask);
1419 	if (folio)
1420 		return folio;
1421 
1422 	if (hugetlb_cma_exclusive_alloc())
1423 		return NULL;
1424 
1425 	folio = (struct folio *)alloc_contig_frozen_pages(1 << order, gfp_mask,
1426 							  nid, nodemask);
1427 	return folio;
1428 }
1429 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE || !CONFIG_CONTIG_ALLOC */
1430 static struct folio *alloc_gigantic_frozen_folio(int order, gfp_t gfp_mask, int nid,
1431 					  nodemask_t *nodemask)
1432 {
1433 	return NULL;
1434 }
1435 #endif
1436 
1437 /*
1438  * Remove hugetlb folio from lists.
1439  * If vmemmap exists for the folio, clear the hugetlb flag so that the
1440  * folio appears as just a compound page.  Otherwise, wait until after
1441  * allocating vmemmap to clear the flag.
1442  *
1443  * Must be called with hugetlb lock held.
1444  */
1445 void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
1446 			  bool adjust_surplus)
1447 {
1448 	int nid = folio_nid(folio);
1449 
1450 	VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio);
1451 	VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio);
1452 
1453 	lockdep_assert_held(&hugetlb_lock);
1454 	if (hstate_is_gigantic_no_runtime(h))
1455 		return;
1456 
1457 	list_del(&folio->lru);
1458 
1459 	if (folio_test_hugetlb_freed(folio)) {
1460 		folio_clear_hugetlb_freed(folio);
1461 		h->free_huge_pages--;
1462 		h->free_huge_pages_node[nid]--;
1463 	}
1464 	if (adjust_surplus) {
1465 		h->surplus_huge_pages--;
1466 		h->surplus_huge_pages_node[nid]--;
1467 	}
1468 
1469 	/*
1470 	 * We can only clear the hugetlb flag after allocating vmemmap
1471 	 * pages.  Otherwise, someone (memory error handling) may try to write
1472 	 * to tail struct pages.
1473 	 */
1474 	if (!folio_test_hugetlb_vmemmap_optimized(folio))
1475 		__folio_clear_hugetlb(folio);
1476 
1477 	h->nr_huge_pages--;
1478 	h->nr_huge_pages_node[nid]--;
1479 }
1480 
1481 void add_hugetlb_folio(struct hstate *h, struct folio *folio,
1482 		       bool adjust_surplus)
1483 {
1484 	int nid = folio_nid(folio);
1485 
1486 	VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio);
1487 
1488 	lockdep_assert_held(&hugetlb_lock);
1489 
1490 	INIT_LIST_HEAD(&folio->lru);
1491 	h->nr_huge_pages++;
1492 	h->nr_huge_pages_node[nid]++;
1493 
1494 	if (adjust_surplus) {
1495 		h->surplus_huge_pages++;
1496 		h->surplus_huge_pages_node[nid]++;
1497 	}
1498 
1499 	__folio_set_hugetlb(folio);
1500 	folio_change_private(folio, NULL);
1501 	/*
1502 	 * We have to set hugetlb_vmemmap_optimized again as above
1503 	 * folio_change_private(folio, NULL) cleared it.
1504 	 */
1505 	folio_set_hugetlb_vmemmap_optimized(folio);
1506 
1507 	arch_clear_hugetlb_flags(folio);
1508 	enqueue_hugetlb_folio(h, folio);
1509 }
1510 
1511 static void __update_and_free_hugetlb_folio(struct hstate *h,
1512 						struct folio *folio)
1513 {
1514 	bool clear_flag = folio_test_hugetlb_vmemmap_optimized(folio);
1515 
1516 	if (hstate_is_gigantic_no_runtime(h))
1517 		return;
1518 
1519 	/*
1520 	 * If we don't know which subpages are hwpoisoned, we can't free
1521 	 * the hugepage, so it's leaked intentionally.
1522 	 */
1523 	if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1524 		return;
1525 
1526 	/*
1527 	 * If folio is not vmemmap optimized (!clear_flag), then the folio
1528 	 * is no longer identified as a hugetlb page.  hugetlb_vmemmap_restore_folio
1529 	 * can only be passed hugetlb pages and will BUG otherwise.
1530 	 */
1531 	if (clear_flag && hugetlb_vmemmap_restore_folio(h, folio)) {
1532 		spin_lock_irq(&hugetlb_lock);
1533 		/*
1534 		 * If we cannot allocate vmemmap pages, just refuse to free the
1535 		 * page and put the page back on the hugetlb free list and treat
1536 		 * as a surplus page.
1537 		 */
1538 		add_hugetlb_folio(h, folio, true);
1539 		spin_unlock_irq(&hugetlb_lock);
1540 		return;
1541 	}
1542 
1543 	/*
1544 	 * If vmemmap pages were allocated above, then we need to clear the
1545 	 * hugetlb flag under the hugetlb lock.
1546 	 */
1547 	if (folio_test_hugetlb(folio)) {
1548 		spin_lock_irq(&hugetlb_lock);
1549 		__folio_clear_hugetlb(folio);
1550 		spin_unlock_irq(&hugetlb_lock);
1551 	}
1552 
1553 	/*
1554 	 * Move PageHWPoison flag from head page to the raw error pages,
1555 	 * which makes any healthy subpages reusable.
1556 	 */
1557 	if (unlikely(folio_test_hwpoison(folio)))
1558 		folio_clear_hugetlb_hwpoison(folio);
1559 
1560 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1561 	if (folio_test_hugetlb_cma(folio))
1562 		hugetlb_cma_free_frozen_folio(folio);
1563 	else
1564 		free_frozen_pages(&folio->page, folio_order(folio));
1565 }
1566 
1567 /*
1568  * As update_and_free_hugetlb_folio() can be called under any context, so we cannot
1569  * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1570  * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1571  * the vmemmap pages.
1572  *
1573  * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1574  * freed and frees them one-by-one. As the page->mapping pointer is going
1575  * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1576  * structure of a lockless linked list of huge pages to be freed.
1577  */
1578 static LLIST_HEAD(hpage_freelist);
1579 
1580 static void free_hpage_workfn(struct work_struct *work)
1581 {
1582 	struct llist_node *node;
1583 
1584 	node = llist_del_all(&hpage_freelist);
1585 
1586 	while (node) {
1587 		struct folio *folio;
1588 		struct hstate *h;
1589 
1590 		folio = container_of((struct address_space **)node,
1591 				     struct folio, mapping);
1592 		node = node->next;
1593 		folio->mapping = NULL;
1594 		/*
1595 		 * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in
1596 		 * folio_hstate() is going to trigger because a previous call to
1597 		 * remove_hugetlb_folio() will clear the hugetlb bit, so do
1598 		 * not use folio_hstate() directly.
1599 		 */
1600 		h = size_to_hstate(folio_size(folio));
1601 
1602 		__update_and_free_hugetlb_folio(h, folio);
1603 
1604 		cond_resched();
1605 	}
1606 }
1607 static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1608 
1609 static inline void flush_free_hpage_work(struct hstate *h)
1610 {
1611 	if (hugetlb_vmemmap_optimizable(h))
1612 		flush_work(&free_hpage_work);
1613 }
1614 
1615 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
1616 				 bool atomic)
1617 {
1618 	if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) {
1619 		__update_and_free_hugetlb_folio(h, folio);
1620 		return;
1621 	}
1622 
1623 	/*
1624 	 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1625 	 *
1626 	 * Only call schedule_work() if hpage_freelist is previously
1627 	 * empty. Otherwise, schedule_work() had been called but the workfn
1628 	 * hasn't retrieved the list yet.
1629 	 */
1630 	if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist))
1631 		schedule_work(&free_hpage_work);
1632 }
1633 
1634 static void bulk_vmemmap_restore_error(struct hstate *h,
1635 					struct list_head *folio_list,
1636 					struct list_head *non_hvo_folios)
1637 {
1638 	struct folio *folio, *t_folio;
1639 
1640 	if (!list_empty(non_hvo_folios)) {
1641 		/*
1642 		 * Free any restored hugetlb pages so that restore of the
1643 		 * entire list can be retried.
1644 		 * The idea is that in the common case of ENOMEM errors freeing
1645 		 * hugetlb pages with vmemmap we will free up memory so that we
1646 		 * can allocate vmemmap for more hugetlb pages.
1647 		 */
1648 		list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) {
1649 			list_del(&folio->lru);
1650 			spin_lock_irq(&hugetlb_lock);
1651 			__folio_clear_hugetlb(folio);
1652 			spin_unlock_irq(&hugetlb_lock);
1653 			update_and_free_hugetlb_folio(h, folio, false);
1654 			cond_resched();
1655 		}
1656 	} else {
1657 		/*
1658 		 * In the case where there are no folios which can be
1659 		 * immediately freed, we loop through the list trying to restore
1660 		 * vmemmap individually in the hope that someone elsewhere may
1661 		 * have done something to cause success (such as freeing some
1662 		 * memory).  If unable to restore a hugetlb page, the hugetlb
1663 		 * page is made a surplus page and removed from the list.
1664 		 * If are able to restore vmemmap and free one hugetlb page, we
1665 		 * quit processing the list to retry the bulk operation.
1666 		 */
1667 		list_for_each_entry_safe(folio, t_folio, folio_list, lru)
1668 			if (hugetlb_vmemmap_restore_folio(h, folio)) {
1669 				list_del(&folio->lru);
1670 				spin_lock_irq(&hugetlb_lock);
1671 				add_hugetlb_folio(h, folio, true);
1672 				spin_unlock_irq(&hugetlb_lock);
1673 			} else {
1674 				list_del(&folio->lru);
1675 				spin_lock_irq(&hugetlb_lock);
1676 				__folio_clear_hugetlb(folio);
1677 				spin_unlock_irq(&hugetlb_lock);
1678 				update_and_free_hugetlb_folio(h, folio, false);
1679 				cond_resched();
1680 				break;
1681 			}
1682 	}
1683 }
1684 
1685 static void update_and_free_pages_bulk(struct hstate *h,
1686 						struct list_head *folio_list)
1687 {
1688 	long ret;
1689 	struct folio *folio, *t_folio;
1690 	LIST_HEAD(non_hvo_folios);
1691 
1692 	/*
1693 	 * First allocate required vmemmmap (if necessary) for all folios.
1694 	 * Carefully handle errors and free up any available hugetlb pages
1695 	 * in an effort to make forward progress.
1696 	 */
1697 retry:
1698 	ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios);
1699 	if (ret < 0) {
1700 		bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios);
1701 		goto retry;
1702 	}
1703 
1704 	/*
1705 	 * At this point, list should be empty, ret should be >= 0 and there
1706 	 * should only be pages on the non_hvo_folios list.
1707 	 * Do note that the non_hvo_folios list could be empty.
1708 	 * Without HVO enabled, ret will be 0 and there is no need to call
1709 	 * __folio_clear_hugetlb as this was done previously.
1710 	 */
1711 	VM_WARN_ON(!list_empty(folio_list));
1712 	VM_WARN_ON(ret < 0);
1713 	if (!list_empty(&non_hvo_folios) && ret) {
1714 		spin_lock_irq(&hugetlb_lock);
1715 		list_for_each_entry(folio, &non_hvo_folios, lru)
1716 			__folio_clear_hugetlb(folio);
1717 		spin_unlock_irq(&hugetlb_lock);
1718 	}
1719 
1720 	list_for_each_entry_safe(folio, t_folio, &non_hvo_folios, lru) {
1721 		update_and_free_hugetlb_folio(h, folio, false);
1722 		cond_resched();
1723 	}
1724 }
1725 
1726 struct hstate *size_to_hstate(unsigned long size)
1727 {
1728 	struct hstate *h;
1729 
1730 	for_each_hstate(h) {
1731 		if (huge_page_size(h) == size)
1732 			return h;
1733 	}
1734 	return NULL;
1735 }
1736 
1737 void free_huge_folio(struct folio *folio)
1738 {
1739 	/*
1740 	 * Can't pass hstate in here because it is called from the
1741 	 * generic mm code.
1742 	 */
1743 	struct hstate *h = folio_hstate(folio);
1744 	int nid = folio_nid(folio);
1745 	struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
1746 	bool restore_reserve;
1747 	unsigned long flags;
1748 
1749 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1750 	VM_BUG_ON_FOLIO(folio_mapcount(folio), folio);
1751 
1752 	hugetlb_set_folio_subpool(folio, NULL);
1753 	if (folio_test_anon(folio))
1754 		__ClearPageAnonExclusive(&folio->page);
1755 	folio->mapping = NULL;
1756 	restore_reserve = folio_test_hugetlb_restore_reserve(folio);
1757 	folio_clear_hugetlb_restore_reserve(folio);
1758 
1759 	/*
1760 	 * If HPageRestoreReserve was set on page, page allocation consumed a
1761 	 * reservation.  If the page was associated with a subpool, there
1762 	 * would have been a page reserved in the subpool before allocation
1763 	 * via hugepage_subpool_get_pages().  Since we are 'restoring' the
1764 	 * reservation, do not call hugepage_subpool_put_pages() as this will
1765 	 * remove the reserved page from the subpool.
1766 	 */
1767 	if (!restore_reserve) {
1768 		/*
1769 		 * A return code of zero implies that the subpool will be
1770 		 * under its minimum size if the reservation is not restored
1771 		 * after page is free.  Therefore, force restore_reserve
1772 		 * operation.
1773 		 */
1774 		if (hugepage_subpool_put_pages(spool, 1) == 0)
1775 			restore_reserve = true;
1776 	}
1777 
1778 	spin_lock_irqsave(&hugetlb_lock, flags);
1779 	folio_clear_hugetlb_migratable(folio);
1780 	hugetlb_cgroup_uncharge_folio(hstate_index(h),
1781 				     pages_per_huge_page(h), folio);
1782 	hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
1783 					  pages_per_huge_page(h), folio);
1784 	lruvec_stat_mod_folio(folio, NR_HUGETLB, -pages_per_huge_page(h));
1785 	mem_cgroup_uncharge(folio);
1786 	if (restore_reserve)
1787 		h->resv_huge_pages++;
1788 
1789 	if (folio_test_hugetlb_temporary(folio)) {
1790 		remove_hugetlb_folio(h, folio, false);
1791 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1792 		update_and_free_hugetlb_folio(h, folio, true);
1793 	} else if (h->surplus_huge_pages_node[nid]) {
1794 		/* remove the page from active list */
1795 		remove_hugetlb_folio(h, folio, true);
1796 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1797 		update_and_free_hugetlb_folio(h, folio, true);
1798 	} else {
1799 		arch_clear_hugetlb_flags(folio);
1800 		enqueue_hugetlb_folio(h, folio);
1801 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1802 	}
1803 }
1804 
1805 /*
1806  * Must be called with the hugetlb lock held
1807  */
1808 static void account_new_hugetlb_folio(struct hstate *h, struct folio *folio)
1809 {
1810 	lockdep_assert_held(&hugetlb_lock);
1811 	h->nr_huge_pages++;
1812 	h->nr_huge_pages_node[folio_nid(folio)]++;
1813 }
1814 
1815 void init_new_hugetlb_folio(struct folio *folio)
1816 {
1817 	__folio_set_hugetlb(folio);
1818 	INIT_LIST_HEAD(&folio->lru);
1819 	hugetlb_set_folio_subpool(folio, NULL);
1820 	set_hugetlb_cgroup(folio, NULL);
1821 	set_hugetlb_cgroup_rsvd(folio, NULL);
1822 }
1823 
1824 /*
1825  * Find and lock address space (mapping) in write mode.
1826  *
1827  * Upon entry, the folio is locked which means that folio_mapping() is
1828  * stable.  Due to locking order, we can only trylock_write.  If we can
1829  * not get the lock, simply return NULL to caller.
1830  */
1831 struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
1832 {
1833 	struct address_space *mapping = folio_mapping(folio);
1834 
1835 	if (!mapping)
1836 		return mapping;
1837 
1838 	if (i_mmap_trylock_write(mapping))
1839 		return mapping;
1840 
1841 	return NULL;
1842 }
1843 
1844 static struct folio *alloc_buddy_frozen_folio(int order, gfp_t gfp_mask,
1845 		int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry)
1846 {
1847 	struct folio *folio;
1848 	bool alloc_try_hard = true;
1849 
1850 	/*
1851 	 * By default we always try hard to allocate the folio with
1852 	 * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating folios in
1853 	 * a loop (to adjust global huge page counts) and previous allocation
1854 	 * failed, do not continue to try hard on the same node.  Use the
1855 	 * node_alloc_noretry bitmap to manage this state information.
1856 	 */
1857 	if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1858 		alloc_try_hard = false;
1859 	if (alloc_try_hard)
1860 		gfp_mask |= __GFP_RETRY_MAYFAIL;
1861 
1862 	folio = (struct folio *)__alloc_frozen_pages(gfp_mask, order, nid, nmask);
1863 
1864 	/*
1865 	 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a
1866 	 * folio this indicates an overall state change.  Clear bit so
1867 	 * that we resume normal 'try hard' allocations.
1868 	 */
1869 	if (node_alloc_noretry && folio && !alloc_try_hard)
1870 		node_clear(nid, *node_alloc_noretry);
1871 
1872 	/*
1873 	 * If we tried hard to get a folio but failed, set bit so that
1874 	 * subsequent attempts will not try as hard until there is an
1875 	 * overall state change.
1876 	 */
1877 	if (node_alloc_noretry && !folio && alloc_try_hard)
1878 		node_set(nid, *node_alloc_noretry);
1879 
1880 	if (!folio) {
1881 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1882 		return NULL;
1883 	}
1884 
1885 	__count_vm_event(HTLB_BUDDY_PGALLOC);
1886 	return folio;
1887 }
1888 
1889 static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
1890 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
1891 		nodemask_t *node_alloc_noretry)
1892 {
1893 	struct folio *folio;
1894 	int order = huge_page_order(h);
1895 
1896 	if (nid == NUMA_NO_NODE)
1897 		nid = numa_mem_id();
1898 
1899 	if (order_is_gigantic(order))
1900 		folio = alloc_gigantic_frozen_folio(order, gfp_mask, nid, nmask);
1901 	else
1902 		folio = alloc_buddy_frozen_folio(order, gfp_mask, nid, nmask,
1903 						 node_alloc_noretry);
1904 	if (folio)
1905 		init_new_hugetlb_folio(folio);
1906 	return folio;
1907 }
1908 
1909 /*
1910  * Common helper to allocate a fresh hugetlb folio. All specific allocators
1911  * should use this function to get new hugetlb folio
1912  *
1913  * Note that returned folio is 'frozen':  ref count of head page and all tail
1914  * pages is zero, and the accounting must be done in the caller.
1915  */
1916 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
1917 		gfp_t gfp_mask, int nid, nodemask_t *nmask)
1918 {
1919 	struct folio *folio;
1920 
1921 	folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
1922 	if (folio)
1923 		hugetlb_vmemmap_optimize_folio(h, folio);
1924 	return folio;
1925 }
1926 
1927 void prep_and_add_allocated_folios(struct hstate *h,
1928 				   struct list_head *folio_list)
1929 {
1930 	unsigned long flags;
1931 	struct folio *folio, *tmp_f;
1932 
1933 	/* Send list for bulk vmemmap optimization processing */
1934 	hugetlb_vmemmap_optimize_folios(h, folio_list);
1935 
1936 	/* Add all new pool pages to free lists in one lock cycle */
1937 	spin_lock_irqsave(&hugetlb_lock, flags);
1938 	list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
1939 		account_new_hugetlb_folio(h, folio);
1940 		enqueue_hugetlb_folio(h, folio);
1941 	}
1942 	spin_unlock_irqrestore(&hugetlb_lock, flags);
1943 }
1944 
1945 /*
1946  * Allocates a fresh hugetlb page in a node interleaved manner.  The page
1947  * will later be added to the appropriate hugetlb pool.
1948  */
1949 static struct folio *alloc_pool_huge_folio(struct hstate *h,
1950 					nodemask_t *nodes_allowed,
1951 					nodemask_t *node_alloc_noretry,
1952 					int *next_node)
1953 {
1954 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1955 	int nr_nodes, node;
1956 
1957 	for_each_node_mask_to_alloc(next_node, nr_nodes, node, nodes_allowed) {
1958 		struct folio *folio;
1959 
1960 		folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node,
1961 					nodes_allowed, node_alloc_noretry);
1962 		if (folio)
1963 			return folio;
1964 	}
1965 
1966 	return NULL;
1967 }
1968 
1969 /*
1970  * Remove huge page from pool from next node to free.  Attempt to keep
1971  * persistent huge pages more or less balanced over allowed nodes.
1972  * This routine only 'removes' the hugetlb page.  The caller must make
1973  * an additional call to free the page to low level allocators.
1974  * Called with hugetlb_lock locked.
1975  */
1976 static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
1977 		nodemask_t *nodes_allowed, bool acct_surplus)
1978 {
1979 	int nr_nodes, node;
1980 	struct folio *folio = NULL;
1981 
1982 	lockdep_assert_held(&hugetlb_lock);
1983 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1984 		/*
1985 		 * If we're returning unused surplus pages, only examine
1986 		 * nodes with surplus pages.
1987 		 */
1988 		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1989 		    !list_empty(&h->hugepage_freelists[node])) {
1990 			folio = list_entry(h->hugepage_freelists[node].next,
1991 					  struct folio, lru);
1992 			remove_hugetlb_folio(h, folio, acct_surplus);
1993 			break;
1994 		}
1995 	}
1996 
1997 	return folio;
1998 }
1999 
2000 /*
2001  * Dissolve a given free hugetlb folio into free buddy pages. This function
2002  * does nothing for in-use hugetlb folios and non-hugetlb folios.
2003  * This function returns values like below:
2004  *
2005  *  -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2006  *           when the system is under memory pressure and the feature of
2007  *           freeing unused vmemmap pages associated with each hugetlb page
2008  *           is enabled.
2009  *  -EBUSY:  failed to dissolved free hugepages or the hugepage is in-use
2010  *           (allocated or reserved.)
2011  *       0:  successfully dissolved free hugepages or the page is not a
2012  *           hugepage (considered as already dissolved)
2013  */
2014 int dissolve_free_hugetlb_folio(struct folio *folio)
2015 {
2016 	int rc = -EBUSY;
2017 
2018 retry:
2019 	/* Not to disrupt normal path by vainly holding hugetlb_lock */
2020 	if (!folio_test_hugetlb(folio))
2021 		return 0;
2022 
2023 	spin_lock_irq(&hugetlb_lock);
2024 	if (!folio_test_hugetlb(folio)) {
2025 		rc = 0;
2026 		goto out;
2027 	}
2028 
2029 	if (!folio_ref_count(folio)) {
2030 		struct hstate *h = folio_hstate(folio);
2031 		bool adjust_surplus = false;
2032 
2033 		if (!available_huge_pages(h))
2034 			goto out;
2035 
2036 		/*
2037 		 * We should make sure that the page is already on the free list
2038 		 * when it is dissolved.
2039 		 */
2040 		if (unlikely(!folio_test_hugetlb_freed(folio))) {
2041 			spin_unlock_irq(&hugetlb_lock);
2042 			cond_resched();
2043 
2044 			/*
2045 			 * Theoretically, we should return -EBUSY when we
2046 			 * encounter this race. In fact, we have a chance
2047 			 * to successfully dissolve the page if we do a
2048 			 * retry. Because the race window is quite small.
2049 			 * If we seize this opportunity, it is an optimization
2050 			 * for increasing the success rate of dissolving page.
2051 			 */
2052 			goto retry;
2053 		}
2054 
2055 		if (h->surplus_huge_pages_node[folio_nid(folio)])
2056 			adjust_surplus = true;
2057 		remove_hugetlb_folio(h, folio, adjust_surplus);
2058 		h->max_huge_pages--;
2059 		spin_unlock_irq(&hugetlb_lock);
2060 
2061 		/*
2062 		 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap
2063 		 * before freeing the page.  update_and_free_hugtlb_folio will fail to
2064 		 * free the page if it can not allocate required vmemmap.  We
2065 		 * need to adjust max_huge_pages if the page is not freed.
2066 		 * Attempt to allocate vmemmmap here so that we can take
2067 		 * appropriate action on failure.
2068 		 *
2069 		 * The folio_test_hugetlb check here is because
2070 		 * remove_hugetlb_folio will clear hugetlb folio flag for
2071 		 * non-vmemmap optimized hugetlb folios.
2072 		 */
2073 		if (folio_test_hugetlb(folio)) {
2074 			rc = hugetlb_vmemmap_restore_folio(h, folio);
2075 			if (rc) {
2076 				spin_lock_irq(&hugetlb_lock);
2077 				add_hugetlb_folio(h, folio, adjust_surplus);
2078 				h->max_huge_pages++;
2079 				goto out;
2080 			}
2081 		} else {
2082 			rc = 0;
2083 		}
2084 
2085 		update_and_free_hugetlb_folio(h, folio, false);
2086 		return rc;
2087 	}
2088 out:
2089 	spin_unlock_irq(&hugetlb_lock);
2090 	return rc;
2091 }
2092 
2093 /*
2094  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2095  * make specified memory blocks removable from the system.
2096  * Note that this will dissolve a free gigantic hugepage completely, if any
2097  * part of it lies within the given range.
2098  * Also note that if dissolve_free_hugetlb_folio() returns with an error, all
2099  * free hugetlb folios that were dissolved before that error are lost.
2100  */
2101 int dissolve_free_hugetlb_folios(unsigned long start_pfn, unsigned long end_pfn)
2102 {
2103 	unsigned long pfn;
2104 	struct folio *folio;
2105 	int rc = 0;
2106 	unsigned int order;
2107 	struct hstate *h;
2108 
2109 	if (!hugepages_supported())
2110 		return rc;
2111 
2112 	order = huge_page_order(&default_hstate);
2113 	for_each_hstate(h)
2114 		order = min(order, huge_page_order(h));
2115 
2116 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
2117 		folio = pfn_folio(pfn);
2118 		rc = dissolve_free_hugetlb_folio(folio);
2119 		if (rc)
2120 			break;
2121 	}
2122 
2123 	return rc;
2124 }
2125 
2126 /*
2127  * Allocates a fresh surplus page from the page allocator.
2128  */
2129 static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
2130 				gfp_t gfp_mask,	int nid, nodemask_t *nmask)
2131 {
2132 	struct folio *folio = NULL;
2133 
2134 	if (hstate_is_gigantic_no_runtime(h))
2135 		return NULL;
2136 
2137 	spin_lock_irq(&hugetlb_lock);
2138 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
2139 		goto out_unlock;
2140 	spin_unlock_irq(&hugetlb_lock);
2141 
2142 	folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask);
2143 	if (!folio)
2144 		return NULL;
2145 
2146 	spin_lock_irq(&hugetlb_lock);
2147 	/*
2148 	 * nr_huge_pages needs to be adjusted within the same lock cycle
2149 	 * as surplus_pages, otherwise it might confuse
2150 	 * persistent_huge_pages() momentarily.
2151 	 */
2152 	account_new_hugetlb_folio(h, folio);
2153 
2154 	/*
2155 	 * We could have raced with the pool size change.
2156 	 * Double check that and simply deallocate the new page
2157 	 * if we would end up overcommiting the surpluses. Abuse
2158 	 * temporary page to workaround the nasty free_huge_folio
2159 	 * codeflow
2160 	 */
2161 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
2162 		folio_set_hugetlb_temporary(folio);
2163 		spin_unlock_irq(&hugetlb_lock);
2164 		free_huge_folio(folio);
2165 		return NULL;
2166 	}
2167 
2168 	h->surplus_huge_pages++;
2169 	h->surplus_huge_pages_node[folio_nid(folio)]++;
2170 
2171 out_unlock:
2172 	spin_unlock_irq(&hugetlb_lock);
2173 
2174 	return folio;
2175 }
2176 
2177 static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
2178 				     int nid, nodemask_t *nmask)
2179 {
2180 	struct folio *folio;
2181 
2182 	if (hstate_is_gigantic(h))
2183 		return NULL;
2184 
2185 	folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask);
2186 	if (!folio)
2187 		return NULL;
2188 
2189 	spin_lock_irq(&hugetlb_lock);
2190 	account_new_hugetlb_folio(h, folio);
2191 	spin_unlock_irq(&hugetlb_lock);
2192 
2193 	/* fresh huge pages are frozen */
2194 	folio_ref_unfreeze(folio, 1);
2195 	/*
2196 	 * We do not account these pages as surplus because they are only
2197 	 * temporary and will be released properly on the last reference
2198 	 */
2199 	folio_set_hugetlb_temporary(folio);
2200 
2201 	return folio;
2202 }
2203 
2204 /*
2205  * Use the VMA's mpolicy to allocate a huge page from the buddy.
2206  */
2207 static
2208 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
2209 		struct vm_area_struct *vma, unsigned long addr)
2210 {
2211 	struct folio *folio = NULL;
2212 	struct mempolicy *mpol;
2213 	gfp_t gfp_mask = htlb_alloc_mask(h);
2214 	int nid;
2215 	nodemask_t *nodemask;
2216 
2217 	nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
2218 	if (mpol_is_preferred_many(mpol)) {
2219 		gfp_t gfp = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2220 
2221 		folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
2222 
2223 		/* Fallback to all nodes if page==NULL */
2224 		nodemask = NULL;
2225 	}
2226 
2227 	if (!folio)
2228 		folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
2229 	mpol_cond_put(mpol);
2230 	return folio;
2231 }
2232 
2233 struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
2234 		nodemask_t *nmask, gfp_t gfp_mask)
2235 {
2236 	struct folio *folio;
2237 
2238 	spin_lock_irq(&hugetlb_lock);
2239 	if (!h->resv_huge_pages) {
2240 		spin_unlock_irq(&hugetlb_lock);
2241 		return NULL;
2242 	}
2243 
2244 	folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid,
2245 					       nmask);
2246 	if (folio)
2247 		h->resv_huge_pages--;
2248 
2249 	spin_unlock_irq(&hugetlb_lock);
2250 	return folio;
2251 }
2252 
2253 /* folio migration callback function */
2254 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
2255 		nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback)
2256 {
2257 	spin_lock_irq(&hugetlb_lock);
2258 	if (available_huge_pages(h)) {
2259 		struct folio *folio;
2260 
2261 		folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
2262 						preferred_nid, nmask);
2263 		if (folio) {
2264 			spin_unlock_irq(&hugetlb_lock);
2265 			return folio;
2266 		}
2267 	}
2268 	spin_unlock_irq(&hugetlb_lock);
2269 
2270 	/* We cannot fallback to other nodes, as we could break the per-node pool. */
2271 	if (!allow_alloc_fallback)
2272 		gfp_mask |= __GFP_THISNODE;
2273 
2274 	return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask);
2275 }
2276 
2277 static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
2278 {
2279 #ifdef CONFIG_NUMA
2280 	struct mempolicy *mpol = get_task_policy(current);
2281 
2282 	/*
2283 	 * Only enforce MPOL_BIND policy which overlaps with cpuset policy
2284 	 * (from policy_nodemask) specifically for hugetlb case
2285 	 */
2286 	if (mpol->mode == MPOL_BIND &&
2287 		(apply_policy_zone(mpol, gfp_zone(gfp)) &&
2288 		 cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
2289 		return &mpol->nodes;
2290 #endif
2291 	return NULL;
2292 }
2293 
2294 /*
2295  * Increase the hugetlb pool such that it can accommodate a reservation
2296  * of size 'delta'.
2297  */
2298 static int gather_surplus_pages(struct hstate *h, long delta)
2299 	__must_hold(&hugetlb_lock)
2300 {
2301 	LIST_HEAD(surplus_list);
2302 	struct folio *folio, *tmp;
2303 	int ret;
2304 	long i;
2305 	long needed, allocated;
2306 	bool alloc_ok = true;
2307 	nodemask_t *mbind_nodemask, alloc_nodemask;
2308 
2309 	mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h));
2310 	if (mbind_nodemask)
2311 		nodes_and(alloc_nodemask, *mbind_nodemask, cpuset_current_mems_allowed);
2312 	else
2313 		alloc_nodemask = cpuset_current_mems_allowed;
2314 
2315 	lockdep_assert_held(&hugetlb_lock);
2316 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2317 	if (needed <= 0) {
2318 		h->resv_huge_pages += delta;
2319 		return 0;
2320 	}
2321 
2322 	allocated = 0;
2323 
2324 	ret = -ENOMEM;
2325 retry:
2326 	spin_unlock_irq(&hugetlb_lock);
2327 	for (i = 0; i < needed; i++) {
2328 		folio = NULL;
2329 
2330 		/*
2331 		 * It is okay to use NUMA_NO_NODE because we use numa_mem_id()
2332 		 * down the road to pick the current node if that is the case.
2333 		 */
2334 		folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
2335 						    NUMA_NO_NODE, &alloc_nodemask);
2336 		if (!folio) {
2337 			alloc_ok = false;
2338 			break;
2339 		}
2340 		list_add(&folio->lru, &surplus_list);
2341 		cond_resched();
2342 	}
2343 	allocated += i;
2344 
2345 	/*
2346 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
2347 	 * because either resv_huge_pages or free_huge_pages may have changed.
2348 	 */
2349 	spin_lock_irq(&hugetlb_lock);
2350 	needed = (h->resv_huge_pages + delta) -
2351 			(h->free_huge_pages + allocated);
2352 	if (needed > 0) {
2353 		if (alloc_ok)
2354 			goto retry;
2355 		/*
2356 		 * We were not able to allocate enough pages to
2357 		 * satisfy the entire reservation so we free what
2358 		 * we've allocated so far.
2359 		 */
2360 		goto free;
2361 	}
2362 	/*
2363 	 * The surplus_list now contains _at_least_ the number of extra pages
2364 	 * needed to accommodate the reservation.  Add the appropriate number
2365 	 * of pages to the hugetlb pool and free the extras back to the buddy
2366 	 * allocator.  Commit the entire reservation here to prevent another
2367 	 * process from stealing the pages as they are added to the pool but
2368 	 * before they are reserved.
2369 	 */
2370 	needed += allocated;
2371 	h->resv_huge_pages += delta;
2372 	ret = 0;
2373 
2374 	/* Free the needed pages to the hugetlb pool */
2375 	list_for_each_entry_safe(folio, tmp, &surplus_list, lru) {
2376 		if ((--needed) < 0)
2377 			break;
2378 		/* Add the page to the hugetlb allocator */
2379 		enqueue_hugetlb_folio(h, folio);
2380 	}
2381 free:
2382 	spin_unlock_irq(&hugetlb_lock);
2383 
2384 	/*
2385 	 * Free unnecessary surplus pages to the buddy allocator.
2386 	 * Pages have no ref count, call free_huge_folio directly.
2387 	 */
2388 	list_for_each_entry_safe(folio, tmp, &surplus_list, lru)
2389 		free_huge_folio(folio);
2390 	spin_lock_irq(&hugetlb_lock);
2391 
2392 	return ret;
2393 }
2394 
2395 /*
2396  * This routine has two main purposes:
2397  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2398  *    in unused_resv_pages.  This corresponds to the prior adjustments made
2399  *    to the associated reservation map.
2400  * 2) Free any unused surplus pages that may have been allocated to satisfy
2401  *    the reservation.  As many as unused_resv_pages may be freed.
2402  */
2403 static void return_unused_surplus_pages(struct hstate *h,
2404 					unsigned long unused_resv_pages)
2405 {
2406 	unsigned long nr_pages;
2407 	LIST_HEAD(page_list);
2408 
2409 	lockdep_assert_held(&hugetlb_lock);
2410 	/* Uncommit the reservation */
2411 	h->resv_huge_pages -= unused_resv_pages;
2412 
2413 	if (hstate_is_gigantic_no_runtime(h))
2414 		goto out;
2415 
2416 	/*
2417 	 * Part (or even all) of the reservation could have been backed
2418 	 * by pre-allocated pages. Only free surplus pages.
2419 	 */
2420 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2421 
2422 	/*
2423 	 * We want to release as many surplus pages as possible, spread
2424 	 * evenly across all nodes with memory. Iterate across these nodes
2425 	 * until we can no longer free unreserved surplus pages. This occurs
2426 	 * when the nodes with surplus pages have no free pages.
2427 	 * remove_pool_hugetlb_folio() will balance the freed pages across the
2428 	 * on-line nodes with memory and will handle the hstate accounting.
2429 	 */
2430 	while (nr_pages--) {
2431 		struct folio *folio;
2432 
2433 		folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1);
2434 		if (!folio)
2435 			goto out;
2436 
2437 		list_add(&folio->lru, &page_list);
2438 	}
2439 
2440 out:
2441 	spin_unlock_irq(&hugetlb_lock);
2442 	update_and_free_pages_bulk(h, &page_list);
2443 	spin_lock_irq(&hugetlb_lock);
2444 }
2445 
2446 
2447 /*
2448  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2449  * are used by the huge page allocation routines to manage reservations.
2450  *
2451  * vma_needs_reservation is called to determine if the huge page at addr
2452  * within the vma has an associated reservation.  If a reservation is
2453  * needed, the value 1 is returned.  The caller is then responsible for
2454  * managing the global reservation and subpool usage counts.  After
2455  * the huge page has been allocated, vma_commit_reservation is called
2456  * to add the page to the reservation map.  If the page allocation fails,
2457  * the reservation must be ended instead of committed.  vma_end_reservation
2458  * is called in such cases.
2459  *
2460  * In the normal case, vma_commit_reservation returns the same value
2461  * as the preceding vma_needs_reservation call.  The only time this
2462  * is not the case is if a reserve map was changed between calls.  It
2463  * is the responsibility of the caller to notice the difference and
2464  * take appropriate action.
2465  *
2466  * vma_add_reservation is used in error paths where a reservation must
2467  * be restored when a newly allocated huge page must be freed.  It is
2468  * to be called after calling vma_needs_reservation to determine if a
2469  * reservation exists.
2470  *
2471  * vma_del_reservation is used in error paths where an entry in the reserve
2472  * map was created during huge page allocation and must be removed.  It is to
2473  * be called after calling vma_needs_reservation to determine if a reservation
2474  * exists.
2475  */
2476 enum vma_resv_mode {
2477 	VMA_NEEDS_RESV,
2478 	VMA_COMMIT_RESV,
2479 	VMA_END_RESV,
2480 	VMA_ADD_RESV,
2481 	VMA_DEL_RESV,
2482 };
2483 static long __vma_reservation_common(struct hstate *h,
2484 				struct vm_area_struct *vma, unsigned long addr,
2485 				enum vma_resv_mode mode)
2486 {
2487 	struct resv_map *resv;
2488 	pgoff_t idx;
2489 	long ret;
2490 	long dummy_out_regions_needed;
2491 
2492 	resv = vma_resv_map(vma);
2493 	if (!resv)
2494 		return 1;
2495 
2496 	idx = vma_hugecache_offset(h, vma, addr);
2497 	switch (mode) {
2498 	case VMA_NEEDS_RESV:
2499 		ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2500 		/* We assume that vma_reservation_* routines always operate on
2501 		 * 1 page, and that adding to resv map a 1 page entry can only
2502 		 * ever require 1 region.
2503 		 */
2504 		VM_BUG_ON(dummy_out_regions_needed != 1);
2505 		break;
2506 	case VMA_COMMIT_RESV:
2507 		ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2508 		/* region_add calls of range 1 should never fail. */
2509 		VM_BUG_ON(ret < 0);
2510 		break;
2511 	case VMA_END_RESV:
2512 		region_abort(resv, idx, idx + 1, 1);
2513 		ret = 0;
2514 		break;
2515 	case VMA_ADD_RESV:
2516 		if (vma->vm_flags & VM_MAYSHARE) {
2517 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2518 			/* region_add calls of range 1 should never fail. */
2519 			VM_BUG_ON(ret < 0);
2520 		} else {
2521 			region_abort(resv, idx, idx + 1, 1);
2522 			ret = region_del(resv, idx, idx + 1);
2523 		}
2524 		break;
2525 	case VMA_DEL_RESV:
2526 		if (vma->vm_flags & VM_MAYSHARE) {
2527 			region_abort(resv, idx, idx + 1, 1);
2528 			ret = region_del(resv, idx, idx + 1);
2529 		} else {
2530 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2531 			/* region_add calls of range 1 should never fail. */
2532 			VM_BUG_ON(ret < 0);
2533 		}
2534 		break;
2535 	default:
2536 		BUG();
2537 	}
2538 
2539 	if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
2540 		return ret;
2541 	/*
2542 	 * We know private mapping must have HPAGE_RESV_OWNER set.
2543 	 *
2544 	 * In most cases, reserves always exist for private mappings.
2545 	 * However, a file associated with mapping could have been
2546 	 * hole punched or truncated after reserves were consumed.
2547 	 * As subsequent fault on such a range will not use reserves.
2548 	 * Subtle - The reserve map for private mappings has the
2549 	 * opposite meaning than that of shared mappings.  If NO
2550 	 * entry is in the reserve map, it means a reservation exists.
2551 	 * If an entry exists in the reserve map, it means the
2552 	 * reservation has already been consumed.  As a result, the
2553 	 * return value of this routine is the opposite of the
2554 	 * value returned from reserve map manipulation routines above.
2555 	 */
2556 	if (ret > 0)
2557 		return 0;
2558 	if (ret == 0)
2559 		return 1;
2560 	return ret;
2561 }
2562 
2563 static long vma_needs_reservation(struct hstate *h,
2564 			struct vm_area_struct *vma, unsigned long addr)
2565 {
2566 	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2567 }
2568 
2569 static long vma_commit_reservation(struct hstate *h,
2570 			struct vm_area_struct *vma, unsigned long addr)
2571 {
2572 	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2573 }
2574 
2575 static void vma_end_reservation(struct hstate *h,
2576 			struct vm_area_struct *vma, unsigned long addr)
2577 {
2578 	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2579 }
2580 
2581 static long vma_add_reservation(struct hstate *h,
2582 			struct vm_area_struct *vma, unsigned long addr)
2583 {
2584 	return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2585 }
2586 
2587 static long vma_del_reservation(struct hstate *h,
2588 			struct vm_area_struct *vma, unsigned long addr)
2589 {
2590 	return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2591 }
2592 
2593 /*
2594  * This routine is called to restore reservation information on error paths.
2595  * It should ONLY be called for folios allocated via alloc_hugetlb_folio(),
2596  * and the hugetlb mutex should remain held when calling this routine.
2597  *
2598  * It handles two specific cases:
2599  * 1) A reservation was in place and the folio consumed the reservation.
2600  *    hugetlb_restore_reserve is set in the folio.
2601  * 2) No reservation was in place for the page, so hugetlb_restore_reserve is
2602  *    not set.  However, alloc_hugetlb_folio always updates the reserve map.
2603  *
2604  * In case 1, free_huge_folio later in the error path will increment the
2605  * global reserve count.  But, free_huge_folio does not have enough context
2606  * to adjust the reservation map.  This case deals primarily with private
2607  * mappings.  Adjust the reserve map here to be consistent with global
2608  * reserve count adjustments to be made by free_huge_folio.  Make sure the
2609  * reserve map indicates there is a reservation present.
2610  *
2611  * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio.
2612  */
2613 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2614 			unsigned long address, struct folio *folio)
2615 {
2616 	long rc = vma_needs_reservation(h, vma, address);
2617 
2618 	if (folio_test_hugetlb_restore_reserve(folio)) {
2619 		if (unlikely(rc < 0))
2620 			/*
2621 			 * Rare out of memory condition in reserve map
2622 			 * manipulation.  Clear hugetlb_restore_reserve so
2623 			 * that global reserve count will not be incremented
2624 			 * by free_huge_folio.  This will make it appear
2625 			 * as though the reservation for this folio was
2626 			 * consumed.  This may prevent the task from
2627 			 * faulting in the folio at a later time.  This
2628 			 * is better than inconsistent global huge page
2629 			 * accounting of reserve counts.
2630 			 */
2631 			folio_clear_hugetlb_restore_reserve(folio);
2632 		else if (rc)
2633 			(void)vma_add_reservation(h, vma, address);
2634 		else
2635 			vma_end_reservation(h, vma, address);
2636 	} else {
2637 		if (!rc) {
2638 			/*
2639 			 * This indicates there is an entry in the reserve map
2640 			 * not added by alloc_hugetlb_folio.  We know it was added
2641 			 * before the alloc_hugetlb_folio call, otherwise
2642 			 * hugetlb_restore_reserve would be set on the folio.
2643 			 * Remove the entry so that a subsequent allocation
2644 			 * does not consume a reservation.
2645 			 */
2646 			rc = vma_del_reservation(h, vma, address);
2647 			if (rc < 0)
2648 				/*
2649 				 * VERY rare out of memory condition.  Since
2650 				 * we can not delete the entry, set
2651 				 * hugetlb_restore_reserve so that the reserve
2652 				 * count will be incremented when the folio
2653 				 * is freed.  This reserve will be consumed
2654 				 * on a subsequent allocation.
2655 				 */
2656 				folio_set_hugetlb_restore_reserve(folio);
2657 		} else if (rc < 0) {
2658 			/*
2659 			 * Rare out of memory condition from
2660 			 * vma_needs_reservation call.  Memory allocation is
2661 			 * only attempted if a new entry is needed.  Therefore,
2662 			 * this implies there is not an entry in the
2663 			 * reserve map.
2664 			 *
2665 			 * For shared mappings, no entry in the map indicates
2666 			 * no reservation.  We are done.
2667 			 */
2668 			if (!(vma->vm_flags & VM_MAYSHARE))
2669 				/*
2670 				 * For private mappings, no entry indicates
2671 				 * a reservation is present.  Since we can
2672 				 * not add an entry, set hugetlb_restore_reserve
2673 				 * on the folio so reserve count will be
2674 				 * incremented when freed.  This reserve will
2675 				 * be consumed on a subsequent allocation.
2676 				 */
2677 				folio_set_hugetlb_restore_reserve(folio);
2678 		} else {
2679 			/*
2680 			 * No reservation present, do nothing
2681 			 */
2682 			vma_end_reservation(h, vma, address);
2683 		}
2684 	}
2685 }
2686 
2687 /*
2688  * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
2689  * the old one
2690  * @old_folio: Old folio to dissolve
2691  * @list: List to isolate the page in case we need to
2692  * Returns 0 on success, otherwise negated error.
2693  */
2694 static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio,
2695 			struct list_head *list)
2696 {
2697 	gfp_t gfp_mask;
2698 	struct hstate *h;
2699 	int nid = folio_nid(old_folio);
2700 	struct folio *new_folio = NULL;
2701 	int ret = 0;
2702 
2703 retry:
2704 	/*
2705 	 * The old_folio might have been dissolved from under our feet, so make sure
2706 	 * to carefully check the state under the lock.
2707 	 */
2708 	spin_lock_irq(&hugetlb_lock);
2709 	if (!folio_test_hugetlb(old_folio)) {
2710 		/*
2711 		 * Freed from under us. Drop new_folio too.
2712 		 */
2713 		goto free_new;
2714 	} else if (folio_ref_count(old_folio)) {
2715 		bool isolated;
2716 
2717 		/*
2718 		 * Someone has grabbed the folio, try to isolate it here.
2719 		 * Fail with -EBUSY if not possible.
2720 		 */
2721 		spin_unlock_irq(&hugetlb_lock);
2722 		isolated = folio_isolate_hugetlb(old_folio, list);
2723 		ret = isolated ? 0 : -EBUSY;
2724 		spin_lock_irq(&hugetlb_lock);
2725 		goto free_new;
2726 	} else if (!folio_test_hugetlb_freed(old_folio)) {
2727 		/*
2728 		 * Folio's refcount is 0 but it has not been enqueued in the
2729 		 * freelist yet. Race window is small, so we can succeed here if
2730 		 * we retry.
2731 		 */
2732 		spin_unlock_irq(&hugetlb_lock);
2733 		cond_resched();
2734 		goto retry;
2735 	} else {
2736 		h = folio_hstate(old_folio);
2737 		if (!new_folio) {
2738 			spin_unlock_irq(&hugetlb_lock);
2739 			gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2740 			new_folio = alloc_fresh_hugetlb_folio(h, gfp_mask,
2741 							      nid, NULL);
2742 			if (!new_folio)
2743 				return -ENOMEM;
2744 			goto retry;
2745 		}
2746 
2747 		/*
2748 		 * Ok, old_folio is still a genuine free hugepage. Remove it from
2749 		 * the freelist and decrease the counters. These will be
2750 		 * incremented again when calling account_new_hugetlb_folio()
2751 		 * and enqueue_hugetlb_folio() for new_folio. The counters will
2752 		 * remain stable since this happens under the lock.
2753 		 */
2754 		remove_hugetlb_folio(h, old_folio, false);
2755 
2756 		/*
2757 		 * Ref count on new_folio is already zero as it was dropped
2758 		 * earlier.  It can be directly added to the pool free list.
2759 		 */
2760 		account_new_hugetlb_folio(h, new_folio);
2761 		enqueue_hugetlb_folio(h, new_folio);
2762 
2763 		/*
2764 		 * Folio has been replaced, we can safely free the old one.
2765 		 */
2766 		spin_unlock_irq(&hugetlb_lock);
2767 		update_and_free_hugetlb_folio(h, old_folio, false);
2768 	}
2769 
2770 	return ret;
2771 
2772 free_new:
2773 	spin_unlock_irq(&hugetlb_lock);
2774 	if (new_folio)
2775 		update_and_free_hugetlb_folio(h, new_folio, false);
2776 
2777 	return ret;
2778 }
2779 
2780 int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
2781 {
2782 	int ret = -EBUSY;
2783 
2784 	/* Not to disrupt normal path by vainly holding hugetlb_lock */
2785 	if (!folio_test_hugetlb(folio))
2786 		return 0;
2787 
2788 	/*
2789 	 * Fence off gigantic pages as there is a cyclic dependency between
2790 	 * alloc_contig_range and them. Return -ENOMEM as this has the effect
2791 	 * of bailing out right away without further retrying.
2792 	 */
2793 	if (order_is_gigantic(folio_order(folio)))
2794 		return -ENOMEM;
2795 
2796 	if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list))
2797 		ret = 0;
2798 	else if (!folio_ref_count(folio))
2799 		ret = alloc_and_dissolve_hugetlb_folio(folio, list);
2800 
2801 	return ret;
2802 }
2803 
2804 /*
2805  *  replace_free_hugepage_folios - Replace free hugepage folios in a given pfn
2806  *  range with new folios.
2807  *  @start_pfn: start pfn of the given pfn range
2808  *  @end_pfn: end pfn of the given pfn range
2809  *  Returns 0 on success, otherwise negated error.
2810  */
2811 int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
2812 {
2813 	unsigned long nr = 0;
2814 	struct page *page;
2815 	struct hstate *h;
2816 	LIST_HEAD(list);
2817 	int ret = 0;
2818 
2819 	/* Avoid pfn iterations if no free non-gigantic huge pages */
2820 	for_each_hstate(h) {
2821 		if (hstate_is_gigantic(h))
2822 			continue;
2823 
2824 		nr += h->free_huge_pages;
2825 		if (nr)
2826 			break;
2827 	}
2828 
2829 	if (!nr)
2830 		return 0;
2831 
2832 	while (start_pfn < end_pfn) {
2833 		page = pfn_to_page(start_pfn);
2834 		nr = 1;
2835 
2836 		if (PageHuge(page) || PageCompound(page)) {
2837 			struct folio *folio = page_folio(page);
2838 
2839 			nr = folio_nr_pages(folio) - folio_page_idx(folio, page);
2840 
2841 			/*
2842 			 * Don't disrupt normal path by vainly holding
2843 			 * hugetlb_lock
2844 			 */
2845 			if (folio_test_hugetlb(folio) && !folio_ref_count(folio)) {
2846 				if (order_is_gigantic(folio_order(folio))) {
2847 					ret = -ENOMEM;
2848 					break;
2849 				}
2850 
2851 				ret = alloc_and_dissolve_hugetlb_folio(folio, &list);
2852 				if (ret)
2853 					break;
2854 
2855 				putback_movable_pages(&list);
2856 			}
2857 		} else if (PageBuddy(page)) {
2858 			/*
2859 			 * Buddy order check without zone lock is unsafe and
2860 			 * the order is maybe invalid, but race should be
2861 			 * small, and the worst thing is skipping free hugetlb.
2862 			 */
2863 			const unsigned int order = buddy_order_unsafe(page);
2864 
2865 			if (order <= MAX_PAGE_ORDER)
2866 				nr = 1UL << order;
2867 		}
2868 		start_pfn += nr;
2869 	}
2870 
2871 	return ret;
2872 }
2873 
2874 void wait_for_freed_hugetlb_folios(void)
2875 {
2876 	if (llist_empty(&hpage_freelist))
2877 		return;
2878 
2879 	flush_work(&free_hpage_work);
2880 }
2881 
2882 typedef enum {
2883 	/*
2884 	 * For either 0/1: we checked the per-vma resv map, and one resv
2885 	 * count either can be reused (0), or an extra needed (1).
2886 	 */
2887 	MAP_CHG_REUSE = 0,
2888 	MAP_CHG_NEEDED = 1,
2889 	/*
2890 	 * Cannot use per-vma resv count can be used, hence a new resv
2891 	 * count is enforced.
2892 	 *
2893 	 * NOTE: This is mostly identical to MAP_CHG_NEEDED, except
2894 	 * that currently vma_needs_reservation() has an unwanted side
2895 	 * effect to either use end() or commit() to complete the
2896 	 * transaction. Hence it needs to differentiate from NEEDED.
2897 	 */
2898 	MAP_CHG_ENFORCED = 2,
2899 } map_chg_state;
2900 
2901 /*
2902  * NOTE! "cow_from_owner" represents a very hacky usage only used in CoW
2903  * faults of hugetlb private mappings on top of a non-page-cache folio (in
2904  * which case even if there's a private vma resv map it won't cover such
2905  * allocation).  New call sites should (probably) never set it to true!!
2906  * When it's set, the allocation will bypass all vma level reservations.
2907  */
2908 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
2909 				    unsigned long addr, bool cow_from_owner)
2910 {
2911 	struct hugepage_subpool *spool = subpool_vma(vma);
2912 	struct hstate *h = hstate_vma(vma);
2913 	struct folio *folio;
2914 	long retval, gbl_chg, gbl_reserve;
2915 	map_chg_state map_chg;
2916 	int ret, idx;
2917 	struct hugetlb_cgroup *h_cg = NULL;
2918 	gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL;
2919 
2920 	idx = hstate_index(h);
2921 
2922 	/* Whether we need a separate per-vma reservation? */
2923 	if (cow_from_owner) {
2924 		/*
2925 		 * Special case!  Since it's a CoW on top of a reserved
2926 		 * page, the private resv map doesn't count.  So it cannot
2927 		 * consume the per-vma resv map even if it's reserved.
2928 		 */
2929 		map_chg = MAP_CHG_ENFORCED;
2930 	} else {
2931 		/*
2932 		 * Examine the region/reserve map to determine if the process
2933 		 * has a reservation for the page to be allocated.  A return
2934 		 * code of zero indicates a reservation exists (no change).
2935 		 */
2936 		retval = vma_needs_reservation(h, vma, addr);
2937 		if (retval < 0)
2938 			return ERR_PTR(-ENOMEM);
2939 		map_chg = retval ? MAP_CHG_NEEDED : MAP_CHG_REUSE;
2940 	}
2941 
2942 	/*
2943 	 * Whether we need a separate global reservation?
2944 	 *
2945 	 * Processes that did not create the mapping will have no
2946 	 * reserves as indicated by the region/reserve map. Check
2947 	 * that the allocation will not exceed the subpool limit.
2948 	 * Or if it can get one from the pool reservation directly.
2949 	 */
2950 	if (map_chg) {
2951 		gbl_chg = hugepage_subpool_get_pages(spool, 1);
2952 		if (gbl_chg < 0)
2953 			goto out_end_reservation;
2954 	} else {
2955 		/*
2956 		 * If we have the vma reservation ready, no need for extra
2957 		 * global reservation.
2958 		 */
2959 		gbl_chg = 0;
2960 	}
2961 
2962 	/*
2963 	 * If this allocation is not consuming a per-vma reservation,
2964 	 * charge the hugetlb cgroup now.
2965 	 */
2966 	if (map_chg) {
2967 		ret = hugetlb_cgroup_charge_cgroup_rsvd(
2968 			idx, pages_per_huge_page(h), &h_cg);
2969 		if (ret)
2970 			goto out_subpool_put;
2971 	}
2972 
2973 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2974 	if (ret)
2975 		goto out_uncharge_cgroup_reservation;
2976 
2977 	spin_lock_irq(&hugetlb_lock);
2978 	/*
2979 	 * glb_chg is passed to indicate whether or not a page must be taken
2980 	 * from the global free pool (global change).  gbl_chg == 0 indicates
2981 	 * a reservation exists for the allocation.
2982 	 */
2983 	folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg);
2984 	if (!folio) {
2985 		spin_unlock_irq(&hugetlb_lock);
2986 		folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
2987 		if (!folio)
2988 			goto out_uncharge_cgroup;
2989 		spin_lock_irq(&hugetlb_lock);
2990 		list_add(&folio->lru, &h->hugepage_activelist);
2991 		folio_ref_unfreeze(folio, 1);
2992 		/* Fall through */
2993 	}
2994 
2995 	/*
2996 	 * Either dequeued or buddy-allocated folio needs to add special
2997 	 * mark to the folio when it consumes a global reservation.
2998 	 */
2999 	if (!gbl_chg) {
3000 		folio_set_hugetlb_restore_reserve(folio);
3001 		h->resv_huge_pages--;
3002 	}
3003 
3004 	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
3005 	/* If allocation is not consuming a reservation, also store the
3006 	 * hugetlb_cgroup pointer on the page.
3007 	 */
3008 	if (map_chg) {
3009 		hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
3010 						  h_cg, folio);
3011 	}
3012 
3013 	spin_unlock_irq(&hugetlb_lock);
3014 
3015 	hugetlb_set_folio_subpool(folio, spool);
3016 
3017 	if (map_chg != MAP_CHG_ENFORCED) {
3018 		/* commit() is only needed if the map_chg is not enforced */
3019 		retval = vma_commit_reservation(h, vma, addr);
3020 		/*
3021 		 * Check for possible race conditions. When it happens..
3022 		 * The page was added to the reservation map between
3023 		 * vma_needs_reservation and vma_commit_reservation.
3024 		 * This indicates a race with hugetlb_reserve_pages.
3025 		 * Adjust for the subpool count incremented above AND
3026 		 * in hugetlb_reserve_pages for the same page.	Also,
3027 		 * the reservation count added in hugetlb_reserve_pages
3028 		 * no longer applies.
3029 		 */
3030 		if (unlikely(map_chg == MAP_CHG_NEEDED && retval == 0)) {
3031 			long rsv_adjust;
3032 
3033 			rsv_adjust = hugepage_subpool_put_pages(spool, 1);
3034 			hugetlb_acct_memory(h, -rsv_adjust);
3035 			spin_lock_irq(&hugetlb_lock);
3036 			hugetlb_cgroup_uncharge_folio_rsvd(
3037 			    hstate_index(h), pages_per_huge_page(h), folio);
3038 			spin_unlock_irq(&hugetlb_lock);
3039 		}
3040 	}
3041 
3042 	ret = mem_cgroup_charge_hugetlb(folio, gfp);
3043 	/*
3044 	 * Unconditionally increment NR_HUGETLB here. If it turns out that
3045 	 * mem_cgroup_charge_hugetlb failed, then immediately free the page and
3046 	 * decrement NR_HUGETLB.
3047 	 */
3048 	lruvec_stat_mod_folio(folio, NR_HUGETLB, pages_per_huge_page(h));
3049 
3050 	if (ret == -ENOMEM) {
3051 		free_huge_folio(folio);
3052 		return ERR_PTR(-ENOMEM);
3053 	}
3054 
3055 	return folio;
3056 
3057 out_uncharge_cgroup:
3058 	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
3059 out_uncharge_cgroup_reservation:
3060 	if (map_chg)
3061 		hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
3062 						    h_cg);
3063 out_subpool_put:
3064 	/*
3065 	 * put page to subpool iff the quota of subpool's rsv_hpages is used
3066 	 * during hugepage_subpool_get_pages.
3067 	 */
3068 	if (map_chg && !gbl_chg) {
3069 		gbl_reserve = hugepage_subpool_put_pages(spool, 1);
3070 		hugetlb_acct_memory(h, -gbl_reserve);
3071 	}
3072 
3073 
3074 out_end_reservation:
3075 	if (map_chg != MAP_CHG_ENFORCED)
3076 		vma_end_reservation(h, vma, addr);
3077 	return ERR_PTR(-ENOSPC);
3078 }
3079 
3080 static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact)
3081 {
3082 	struct huge_bootmem_page *m;
3083 	int listnode = nid;
3084 
3085 	if (hugetlb_early_cma(h))
3086 		m = hugetlb_cma_alloc_bootmem(h, &listnode, node_exact);
3087 	else {
3088 		if (node_exact)
3089 			m = memblock_alloc_exact_nid_raw(huge_page_size(h),
3090 				huge_page_size(h), 0,
3091 				MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3092 		else {
3093 			m = memblock_alloc_try_nid_raw(huge_page_size(h),
3094 				huge_page_size(h), 0,
3095 				MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3096 			/*
3097 			 * For pre-HVO to work correctly, pages need to be on
3098 			 * the list for the node they were actually allocated
3099 			 * from. That node may be different in the case of
3100 			 * fallback by memblock_alloc_try_nid_raw. So,
3101 			 * extract the actual node first.
3102 			 */
3103 			if (m)
3104 				listnode = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m)));
3105 		}
3106 
3107 		if (m) {
3108 			m->flags = 0;
3109 			m->cma = NULL;
3110 		}
3111 	}
3112 
3113 	if (m) {
3114 		/*
3115 		 * Use the beginning of the huge page to store the
3116 		 * huge_bootmem_page struct (until gather_bootmem
3117 		 * puts them into the mem_map).
3118 		 *
3119 		 * Put them into a private list first because mem_map
3120 		 * is not up yet.
3121 		 */
3122 		INIT_LIST_HEAD(&m->list);
3123 		list_add(&m->list, &huge_boot_pages[listnode]);
3124 		m->hstate = h;
3125 	}
3126 
3127 	return m;
3128 }
3129 
3130 int alloc_bootmem_huge_page(struct hstate *h, int nid)
3131 	__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
3132 int __alloc_bootmem_huge_page(struct hstate *h, int nid)
3133 {
3134 	struct huge_bootmem_page *m = NULL; /* initialize for clang */
3135 	int nr_nodes, node = nid;
3136 
3137 	/* do node specific alloc */
3138 	if (nid != NUMA_NO_NODE) {
3139 		m = alloc_bootmem(h, node, true);
3140 		if (!m)
3141 			return 0;
3142 		goto found;
3143 	}
3144 
3145 	/* allocate from next node when distributing huge pages */
3146 	for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node,
3147 				    &hugetlb_bootmem_nodes) {
3148 		m = alloc_bootmem(h, node, false);
3149 		if (!m)
3150 			return 0;
3151 		goto found;
3152 	}
3153 
3154 found:
3155 
3156 	/*
3157 	 * Only initialize the head struct page in memmap_init_reserved_pages,
3158 	 * rest of the struct pages will be initialized by the HugeTLB
3159 	 * subsystem itself.
3160 	 * The head struct page is used to get folio information by the HugeTLB
3161 	 * subsystem like zone id and node id.
3162 	 */
3163 	memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE),
3164 		huge_page_size(h) - PAGE_SIZE);
3165 
3166 	return 1;
3167 }
3168 
3169 /* Initialize [start_page:end_page_number] tail struct pages of a hugepage */
3170 static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio,
3171 					unsigned long start_page_number,
3172 					unsigned long end_page_number)
3173 {
3174 	enum zone_type zone = folio_zonenum(folio);
3175 	int nid = folio_nid(folio);
3176 	struct page *page = folio_page(folio, start_page_number);
3177 	unsigned long head_pfn = folio_pfn(folio);
3178 	unsigned long pfn, end_pfn = head_pfn + end_page_number;
3179 
3180 	/*
3181 	 * As we marked all tail pages with memblock_reserved_mark_noinit(),
3182 	 * we must initialize them ourselves here.
3183 	 */
3184 	for (pfn = head_pfn + start_page_number; pfn < end_pfn; page++, pfn++) {
3185 		__init_single_page(page, pfn, zone, nid);
3186 		prep_compound_tail((struct page *)folio, pfn - head_pfn);
3187 		set_page_count(page, 0);
3188 	}
3189 }
3190 
3191 static void __init hugetlb_folio_init_vmemmap(struct folio *folio,
3192 					      struct hstate *h,
3193 					      unsigned long nr_pages)
3194 {
3195 	int ret;
3196 
3197 	/*
3198 	 * This is an open-coded prep_compound_page() whereby we avoid
3199 	 * walking pages twice by initializing/preparing+freezing them in the
3200 	 * same go.
3201 	 */
3202 	__folio_clear_reserved(folio);
3203 	__folio_set_head(folio);
3204 	ret = folio_ref_freeze(folio, 1);
3205 	VM_BUG_ON(!ret);
3206 	hugetlb_folio_init_tail_vmemmap(folio, 1, nr_pages);
3207 	prep_compound_head(&folio->page, huge_page_order(h));
3208 }
3209 
3210 static bool __init hugetlb_bootmem_page_prehvo(struct huge_bootmem_page *m)
3211 {
3212 	return m->flags & HUGE_BOOTMEM_HVO;
3213 }
3214 
3215 static bool __init hugetlb_bootmem_page_earlycma(struct huge_bootmem_page *m)
3216 {
3217 	return m->flags & HUGE_BOOTMEM_CMA;
3218 }
3219 
3220 /*
3221  * memblock-allocated pageblocks might not have the migrate type set
3222  * if marked with the 'noinit' flag. Set it to the default (MIGRATE_MOVABLE)
3223  * here, or MIGRATE_CMA if this was a page allocated through an early CMA
3224  * reservation.
3225  *
3226  * In case of vmemmap optimized folios, the tail vmemmap pages are mapped
3227  * read-only, but that's ok - for sparse vmemmap this does not write to
3228  * the page structure.
3229  */
3230 static void __init hugetlb_bootmem_init_migratetype(struct folio *folio,
3231 							  struct hstate *h)
3232 {
3233 	unsigned long nr_pages = pages_per_huge_page(h), i;
3234 
3235 	WARN_ON_ONCE(!pageblock_aligned(folio_pfn(folio)));
3236 
3237 	for (i = 0; i < nr_pages; i += pageblock_nr_pages) {
3238 		if (folio_test_hugetlb_cma(folio))
3239 			init_cma_pageblock(folio_page(folio, i));
3240 		else
3241 			init_pageblock_migratetype(folio_page(folio, i),
3242 					  MIGRATE_MOVABLE, false);
3243 	}
3244 }
3245 
3246 static void __init prep_and_add_bootmem_folios(struct hstate *h,
3247 					struct list_head *folio_list)
3248 {
3249 	unsigned long flags;
3250 	struct folio *folio, *tmp_f;
3251 
3252 	/* Send list for bulk vmemmap optimization processing */
3253 	hugetlb_vmemmap_optimize_bootmem_folios(h, folio_list);
3254 
3255 	list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
3256 		if (!folio_test_hugetlb_vmemmap_optimized(folio)) {
3257 			/*
3258 			 * If HVO fails, initialize all tail struct pages
3259 			 * We do not worry about potential long lock hold
3260 			 * time as this is early in boot and there should
3261 			 * be no contention.
3262 			 */
3263 			hugetlb_folio_init_tail_vmemmap(folio,
3264 					HUGETLB_VMEMMAP_RESERVE_PAGES,
3265 					pages_per_huge_page(h));
3266 		}
3267 		hugetlb_bootmem_init_migratetype(folio, h);
3268 		/* Subdivide locks to achieve better parallel performance */
3269 		spin_lock_irqsave(&hugetlb_lock, flags);
3270 		account_new_hugetlb_folio(h, folio);
3271 		enqueue_hugetlb_folio(h, folio);
3272 		spin_unlock_irqrestore(&hugetlb_lock, flags);
3273 	}
3274 }
3275 
3276 bool __init hugetlb_bootmem_page_zones_valid(int nid,
3277 					     struct huge_bootmem_page *m)
3278 {
3279 	unsigned long start_pfn;
3280 	bool valid;
3281 
3282 	if (m->flags & HUGE_BOOTMEM_ZONES_VALID) {
3283 		/*
3284 		 * Already validated, skip check.
3285 		 */
3286 		return true;
3287 	}
3288 
3289 	if (hugetlb_bootmem_page_earlycma(m)) {
3290 		valid = cma_validate_zones(m->cma);
3291 		goto out;
3292 	}
3293 
3294 	start_pfn = virt_to_phys(m) >> PAGE_SHIFT;
3295 
3296 	valid = !pfn_range_intersects_zones(nid, start_pfn,
3297 			pages_per_huge_page(m->hstate));
3298 out:
3299 	if (!valid)
3300 		hstate_boot_nrinvalid[hstate_index(m->hstate)]++;
3301 
3302 	return valid;
3303 }
3304 
3305 /*
3306  * Free a bootmem page that was found to be invalid (intersecting with
3307  * multiple zones).
3308  *
3309  * Since it intersects with multiple zones, we can't just do a free
3310  * operation on all pages at once, but instead have to walk all
3311  * pages, freeing them one by one.
3312  */
3313 static void __init hugetlb_bootmem_free_invalid_page(int nid, struct page *page,
3314 					     struct hstate *h)
3315 {
3316 	unsigned long npages = pages_per_huge_page(h);
3317 	unsigned long pfn;
3318 
3319 	while (npages--) {
3320 		pfn = page_to_pfn(page);
3321 		__init_page_from_nid(pfn, nid);
3322 		free_reserved_page(page);
3323 		page++;
3324 	}
3325 }
3326 
3327 /*
3328  * Put bootmem huge pages into the standard lists after mem_map is up.
3329  * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
3330  */
3331 static void __init gather_bootmem_prealloc_node(unsigned long nid)
3332 {
3333 	LIST_HEAD(folio_list);
3334 	struct huge_bootmem_page *m, *tm;
3335 	struct hstate *h = NULL, *prev_h = NULL;
3336 
3337 	list_for_each_entry_safe(m, tm, &huge_boot_pages[nid], list) {
3338 		struct page *page = virt_to_page(m);
3339 		struct folio *folio = (void *)page;
3340 
3341 		h = m->hstate;
3342 		if (!hugetlb_bootmem_page_zones_valid(nid, m)) {
3343 			/*
3344 			 * Can't use this page. Initialize the
3345 			 * page structures if that hasn't already
3346 			 * been done, and give them to the page
3347 			 * allocator.
3348 			 */
3349 			hugetlb_bootmem_free_invalid_page(nid, page, h);
3350 			continue;
3351 		}
3352 
3353 		/*
3354 		 * It is possible to have multiple huge page sizes (hstates)
3355 		 * in this list.  If so, process each size separately.
3356 		 */
3357 		if (h != prev_h && prev_h != NULL)
3358 			prep_and_add_bootmem_folios(prev_h, &folio_list);
3359 		prev_h = h;
3360 
3361 		VM_BUG_ON(!hstate_is_gigantic(h));
3362 		WARN_ON(folio_ref_count(folio) != 1);
3363 
3364 		hugetlb_folio_init_vmemmap(folio, h,
3365 					   HUGETLB_VMEMMAP_RESERVE_PAGES);
3366 		init_new_hugetlb_folio(folio);
3367 
3368 		if (hugetlb_bootmem_page_prehvo(m))
3369 			/*
3370 			 * If pre-HVO was done, just set the
3371 			 * flag, the HVO code will then skip
3372 			 * this folio.
3373 			 */
3374 			folio_set_hugetlb_vmemmap_optimized(folio);
3375 
3376 		if (hugetlb_bootmem_page_earlycma(m))
3377 			folio_set_hugetlb_cma(folio);
3378 
3379 		list_add(&folio->lru, &folio_list);
3380 
3381 		/*
3382 		 * We need to restore the 'stolen' pages to totalram_pages
3383 		 * in order to fix confusing memory reports from free(1) and
3384 		 * other side-effects, like CommitLimit going negative.
3385 		 *
3386 		 * For CMA pages, this is done in init_cma_pageblock
3387 		 * (via hugetlb_bootmem_init_migratetype), so skip it here.
3388 		 */
3389 		if (!folio_test_hugetlb_cma(folio))
3390 			adjust_managed_page_count(page, pages_per_huge_page(h));
3391 		cond_resched();
3392 	}
3393 
3394 	prep_and_add_bootmem_folios(h, &folio_list);
3395 }
3396 
3397 static void __init gather_bootmem_prealloc_parallel(unsigned long start,
3398 						    unsigned long end, void *arg)
3399 {
3400 	int nid;
3401 
3402 	for (nid = start; nid < end; nid++)
3403 		gather_bootmem_prealloc_node(nid);
3404 }
3405 
3406 static void __init gather_bootmem_prealloc(void)
3407 {
3408 	struct padata_mt_job job = {
3409 		.thread_fn	= gather_bootmem_prealloc_parallel,
3410 		.fn_arg		= NULL,
3411 		.start		= 0,
3412 		.size		= nr_node_ids,
3413 		.align		= 1,
3414 		.min_chunk	= 1,
3415 		.max_threads	= num_node_state(N_MEMORY),
3416 		.numa_aware	= true,
3417 	};
3418 
3419 	padata_do_multithreaded(&job);
3420 }
3421 
3422 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3423 {
3424 	unsigned long i;
3425 	char buf[32];
3426 	LIST_HEAD(folio_list);
3427 
3428 	for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3429 		if (hstate_is_gigantic(h)) {
3430 			if (!alloc_bootmem_huge_page(h, nid))
3431 				break;
3432 		} else {
3433 			struct folio *folio;
3434 			gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3435 
3436 			folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
3437 					&node_states[N_MEMORY], NULL);
3438 			if (!folio && !list_empty(&folio_list) &&
3439 			    hugetlb_vmemmap_optimizable_size(h)) {
3440 				prep_and_add_allocated_folios(h, &folio_list);
3441 				INIT_LIST_HEAD(&folio_list);
3442 				folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
3443 						&node_states[N_MEMORY], NULL);
3444 			}
3445 			if (!folio)
3446 				break;
3447 			list_add(&folio->lru, &folio_list);
3448 		}
3449 		cond_resched();
3450 	}
3451 
3452 	if (!list_empty(&folio_list))
3453 		prep_and_add_allocated_folios(h, &folio_list);
3454 
3455 	if (i == h->max_huge_pages_node[nid])
3456 		return;
3457 
3458 	string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3459 	pr_warn("HugeTLB: allocating %u of page size %s failed node%d.  Only allocated %lu hugepages.\n",
3460 		h->max_huge_pages_node[nid], buf, nid, i);
3461 	h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3462 	h->max_huge_pages_node[nid] = i;
3463 }
3464 
3465 static bool __init hugetlb_hstate_alloc_pages_specific_nodes(struct hstate *h)
3466 {
3467 	int i;
3468 	bool node_specific_alloc = false;
3469 
3470 	for_each_online_node(i) {
3471 		if (h->max_huge_pages_node[i] > 0) {
3472 			hugetlb_hstate_alloc_pages_onenode(h, i);
3473 			node_specific_alloc = true;
3474 		}
3475 	}
3476 
3477 	return node_specific_alloc;
3478 }
3479 
3480 static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated, struct hstate *h)
3481 {
3482 	if (allocated < h->max_huge_pages) {
3483 		char buf[32];
3484 
3485 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3486 		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
3487 			h->max_huge_pages, buf, allocated);
3488 		h->max_huge_pages = allocated;
3489 	}
3490 }
3491 
3492 static void __init hugetlb_pages_alloc_boot_node(unsigned long start, unsigned long end, void *arg)
3493 {
3494 	struct hstate *h = (struct hstate *)arg;
3495 	int i, num = end - start;
3496 	nodemask_t node_alloc_noretry;
3497 	LIST_HEAD(folio_list);
3498 	int next_node = first_online_node;
3499 
3500 	/* Bit mask controlling how hard we retry per-node allocations.*/
3501 	nodes_clear(node_alloc_noretry);
3502 
3503 	for (i = 0; i < num; ++i) {
3504 		struct folio *folio;
3505 
3506 		if (hugetlb_vmemmap_optimizable_size(h) &&
3507 		    (si_mem_available() == 0) && !list_empty(&folio_list)) {
3508 			prep_and_add_allocated_folios(h, &folio_list);
3509 			INIT_LIST_HEAD(&folio_list);
3510 		}
3511 		folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
3512 						&node_alloc_noretry, &next_node);
3513 		if (!folio)
3514 			break;
3515 
3516 		list_move(&folio->lru, &folio_list);
3517 		cond_resched();
3518 	}
3519 
3520 	prep_and_add_allocated_folios(h, &folio_list);
3521 }
3522 
3523 static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h)
3524 {
3525 	unsigned long i;
3526 
3527 	for (i = 0; i < h->max_huge_pages; ++i) {
3528 		if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3529 			break;
3530 		cond_resched();
3531 	}
3532 
3533 	return i;
3534 }
3535 
3536 static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
3537 {
3538 	struct padata_mt_job job = {
3539 		.fn_arg		= h,
3540 		.align		= 1,
3541 		.numa_aware	= true
3542 	};
3543 
3544 	unsigned long jiffies_start;
3545 	unsigned long jiffies_end;
3546 	unsigned long remaining;
3547 
3548 	job.thread_fn	= hugetlb_pages_alloc_boot_node;
3549 
3550 	/*
3551 	 * job.max_threads is 25% of the available cpu threads by default.
3552 	 *
3553 	 * On large servers with terabytes of memory, huge page allocation
3554 	 * can consume a considerably amount of time.
3555 	 *
3556 	 * Tests below show how long it takes to allocate 1 TiB of memory with 2MiB huge pages.
3557 	 * 2MiB huge pages. Using more threads can significantly improve allocation time.
3558 	 *
3559 	 * +-----------------------+-------+-------+-------+-------+-------+
3560 	 * | threads               |   8   |   16  |   32  |   64  |   128 |
3561 	 * +-----------------------+-------+-------+-------+-------+-------+
3562 	 * | skylake      144 cpus |   44s |   22s |   16s |   19s |   20s |
3563 	 * | cascade lake 192 cpus |   39s |   20s |   11s |   10s |    9s |
3564 	 * +-----------------------+-------+-------+-------+-------+-------+
3565 	 */
3566 	if (hugepage_allocation_threads == 0) {
3567 		hugepage_allocation_threads = num_online_cpus() / 4;
3568 		hugepage_allocation_threads = max(hugepage_allocation_threads, 1);
3569 	}
3570 
3571 	job.max_threads	= hugepage_allocation_threads;
3572 
3573 	jiffies_start = jiffies;
3574 	do {
3575 		remaining = h->max_huge_pages - h->nr_huge_pages;
3576 
3577 		job.start     = h->nr_huge_pages;
3578 		job.size      = remaining;
3579 		job.min_chunk = remaining / hugepage_allocation_threads;
3580 		padata_do_multithreaded(&job);
3581 
3582 		if (h->nr_huge_pages == h->max_huge_pages)
3583 			break;
3584 
3585 		/*
3586 		 * Retry only if the vmemmap optimization might have been able to free
3587 		 * some memory back to the system.
3588 		 */
3589 		if (!hugetlb_vmemmap_optimizable(h))
3590 			break;
3591 
3592 		/* Continue if progress was made in last iteration */
3593 	} while (remaining != (h->max_huge_pages - h->nr_huge_pages));
3594 
3595 	jiffies_end = jiffies;
3596 
3597 	pr_info("HugeTLB: allocation took %dms with hugepage_allocation_threads=%ld\n",
3598 		jiffies_to_msecs(jiffies_end - jiffies_start),
3599 		hugepage_allocation_threads);
3600 
3601 	return h->nr_huge_pages;
3602 }
3603 
3604 /*
3605  * NOTE: this routine is called in different contexts for gigantic and
3606  * non-gigantic pages.
3607  * - For gigantic pages, this is called early in the boot process and
3608  *   pages are allocated from memblock allocated or something similar.
3609  *   Gigantic pages are actually added to pools later with the routine
3610  *   gather_bootmem_prealloc.
3611  * - For non-gigantic pages, this is called later in the boot process after
3612  *   all of mm is up and functional.  Pages are allocated from buddy and
3613  *   then added to hugetlb pools.
3614  */
3615 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
3616 {
3617 	unsigned long allocated;
3618 
3619 	/*
3620 	 * Skip gigantic hugepages allocation if early CMA
3621 	 * reservations are not available.
3622 	 */
3623 	if (hstate_is_gigantic(h) && hugetlb_cma_total_size() &&
3624 	    !hugetlb_early_cma(h)) {
3625 		pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3626 		return;
3627 	}
3628 
3629 	if (!h->max_huge_pages)
3630 		return;
3631 
3632 	/* do node specific alloc */
3633 	if (hugetlb_hstate_alloc_pages_specific_nodes(h))
3634 		return;
3635 
3636 	/* below will do all node balanced alloc */
3637 	if (hstate_is_gigantic(h))
3638 		allocated = hugetlb_gigantic_pages_alloc_boot(h);
3639 	else
3640 		allocated = hugetlb_pages_alloc_boot(h);
3641 
3642 	hugetlb_hstate_alloc_pages_errcheck(allocated, h);
3643 }
3644 
3645 static void __init hugetlb_init_hstates(void)
3646 {
3647 	struct hstate *h, *h2;
3648 
3649 	for_each_hstate(h) {
3650 		/*
3651 		 * Always reset to first_memory_node here, even if
3652 		 * next_nid_to_alloc was set before - we can't
3653 		 * reference hugetlb_bootmem_nodes after init, and
3654 		 * first_memory_node is right for all further allocations.
3655 		 */
3656 		h->next_nid_to_alloc = first_memory_node;
3657 		h->next_nid_to_free = first_memory_node;
3658 
3659 		/* oversize hugepages were init'ed in early boot */
3660 		if (!hstate_is_gigantic(h))
3661 			hugetlb_hstate_alloc_pages(h);
3662 
3663 		/*
3664 		 * Set demote order for each hstate.  Note that
3665 		 * h->demote_order is initially 0.
3666 		 * - We can not demote gigantic pages if runtime freeing
3667 		 *   is not supported, so skip this.
3668 		 * - If CMA allocation is possible, we can not demote
3669 		 *   HUGETLB_PAGE_ORDER or smaller size pages.
3670 		 */
3671 		if (hstate_is_gigantic_no_runtime(h))
3672 			continue;
3673 		if (hugetlb_cma_total_size() && h->order <= HUGETLB_PAGE_ORDER)
3674 			continue;
3675 		for_each_hstate(h2) {
3676 			if (h2 == h)
3677 				continue;
3678 			if (h2->order < h->order &&
3679 			    h2->order > h->demote_order)
3680 				h->demote_order = h2->order;
3681 		}
3682 	}
3683 }
3684 
3685 static void __init report_hugepages(void)
3686 {
3687 	struct hstate *h;
3688 	unsigned long nrinvalid;
3689 
3690 	for_each_hstate(h) {
3691 		char buf[32];
3692 
3693 		nrinvalid = hstate_boot_nrinvalid[hstate_index(h)];
3694 		h->max_huge_pages -= nrinvalid;
3695 
3696 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3697 		pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
3698 			buf, h->nr_huge_pages);
3699 		if (nrinvalid)
3700 			pr_info("HugeTLB: %s page size: %lu invalid page%s discarded\n",
3701 					buf, nrinvalid, str_plural(nrinvalid));
3702 		pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
3703 			hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
3704 	}
3705 }
3706 
3707 #ifdef CONFIG_HIGHMEM
3708 static void try_to_free_low(struct hstate *h, unsigned long count,
3709 						nodemask_t *nodes_allowed)
3710 {
3711 	int i;
3712 	LIST_HEAD(page_list);
3713 
3714 	lockdep_assert_held(&hugetlb_lock);
3715 	if (hstate_is_gigantic(h))
3716 		return;
3717 
3718 	/*
3719 	 * Collect pages to be freed on a list, and free after dropping lock
3720 	 */
3721 	for_each_node_mask(i, *nodes_allowed) {
3722 		struct folio *folio, *next;
3723 		struct list_head *freel = &h->hugepage_freelists[i];
3724 		list_for_each_entry_safe(folio, next, freel, lru) {
3725 			if (count >= h->nr_huge_pages)
3726 				goto out;
3727 			if (folio_test_highmem(folio))
3728 				continue;
3729 			remove_hugetlb_folio(h, folio, false);
3730 			list_add(&folio->lru, &page_list);
3731 		}
3732 	}
3733 
3734 out:
3735 	spin_unlock_irq(&hugetlb_lock);
3736 	update_and_free_pages_bulk(h, &page_list);
3737 	spin_lock_irq(&hugetlb_lock);
3738 }
3739 #else
3740 static inline void try_to_free_low(struct hstate *h, unsigned long count,
3741 						nodemask_t *nodes_allowed)
3742 {
3743 }
3744 #endif
3745 
3746 /*
3747  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
3748  * balanced by operating on them in a round-robin fashion.
3749  * Returns 1 if an adjustment was made.
3750  */
3751 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
3752 				int delta)
3753 {
3754 	int nr_nodes, node;
3755 
3756 	lockdep_assert_held(&hugetlb_lock);
3757 	VM_BUG_ON(delta != -1 && delta != 1);
3758 
3759 	if (delta < 0) {
3760 		for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, nodes_allowed) {
3761 			if (h->surplus_huge_pages_node[node])
3762 				goto found;
3763 		}
3764 	} else {
3765 		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3766 			if (h->surplus_huge_pages_node[node] <
3767 					h->nr_huge_pages_node[node])
3768 				goto found;
3769 		}
3770 	}
3771 	return 0;
3772 
3773 found:
3774 	h->surplus_huge_pages += delta;
3775 	h->surplus_huge_pages_node[node] += delta;
3776 	return 1;
3777 }
3778 
3779 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
3780 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
3781 			      nodemask_t *nodes_allowed)
3782 {
3783 	unsigned long persistent_free_count;
3784 	unsigned long min_count;
3785 	unsigned long allocated;
3786 	struct folio *folio;
3787 	LIST_HEAD(page_list);
3788 	NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
3789 
3790 	/*
3791 	 * Bit mask controlling how hard we retry per-node allocations.
3792 	 * If we can not allocate the bit mask, do not attempt to allocate
3793 	 * the requested huge pages.
3794 	 */
3795 	if (node_alloc_noretry)
3796 		nodes_clear(*node_alloc_noretry);
3797 	else
3798 		return -ENOMEM;
3799 
3800 	/*
3801 	 * resize_lock mutex prevents concurrent adjustments to number of
3802 	 * pages in hstate via the proc/sysfs interfaces.
3803 	 */
3804 	mutex_lock(&h->resize_lock);
3805 	flush_free_hpage_work(h);
3806 	spin_lock_irq(&hugetlb_lock);
3807 
3808 	/*
3809 	 * Check for a node specific request.
3810 	 * Changing node specific huge page count may require a corresponding
3811 	 * change to the global count.  In any case, the passed node mask
3812 	 * (nodes_allowed) will restrict alloc/free to the specified node.
3813 	 */
3814 	if (nid != NUMA_NO_NODE) {
3815 		unsigned long old_count = count;
3816 
3817 		count += persistent_huge_pages(h) -
3818 			 (h->nr_huge_pages_node[nid] -
3819 			  h->surplus_huge_pages_node[nid]);
3820 		/*
3821 		 * User may have specified a large count value which caused the
3822 		 * above calculation to overflow.  In this case, they wanted
3823 		 * to allocate as many huge pages as possible.  Set count to
3824 		 * largest possible value to align with their intention.
3825 		 */
3826 		if (count < old_count)
3827 			count = ULONG_MAX;
3828 	}
3829 
3830 	/*
3831 	 * Gigantic pages runtime allocation depend on the capability for large
3832 	 * page range allocation.
3833 	 * If the system does not provide this feature, return an error when
3834 	 * the user tries to allocate gigantic pages but let the user free the
3835 	 * boottime allocated gigantic pages.
3836 	 */
3837 	if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
3838 		if (count > persistent_huge_pages(h)) {
3839 			spin_unlock_irq(&hugetlb_lock);
3840 			mutex_unlock(&h->resize_lock);
3841 			NODEMASK_FREE(node_alloc_noretry);
3842 			return -EINVAL;
3843 		}
3844 		/* Fall through to decrease pool */
3845 	}
3846 
3847 	/*
3848 	 * Increase the pool size
3849 	 * First take pages out of surplus state.  Then make up the
3850 	 * remaining difference by allocating fresh huge pages.
3851 	 *
3852 	 * We might race with alloc_surplus_hugetlb_folio() here and be unable
3853 	 * to convert a surplus huge page to a normal huge page. That is
3854 	 * not critical, though, it just means the overall size of the
3855 	 * pool might be one hugepage larger than it needs to be, but
3856 	 * within all the constraints specified by the sysctls.
3857 	 */
3858 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
3859 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
3860 			break;
3861 	}
3862 
3863 	allocated = 0;
3864 	while (count > (persistent_huge_pages(h) + allocated)) {
3865 		/*
3866 		 * If this allocation races such that we no longer need the
3867 		 * page, free_huge_folio will handle it by freeing the page
3868 		 * and reducing the surplus.
3869 		 */
3870 		spin_unlock_irq(&hugetlb_lock);
3871 
3872 		/* yield cpu to avoid soft lockup */
3873 		cond_resched();
3874 
3875 		folio = alloc_pool_huge_folio(h, nodes_allowed,
3876 						node_alloc_noretry,
3877 						&h->next_nid_to_alloc);
3878 		if (!folio) {
3879 			prep_and_add_allocated_folios(h, &page_list);
3880 			spin_lock_irq(&hugetlb_lock);
3881 			goto out;
3882 		}
3883 
3884 		list_add(&folio->lru, &page_list);
3885 		allocated++;
3886 
3887 		/* Bail for signals. Probably ctrl-c from user */
3888 		if (signal_pending(current)) {
3889 			prep_and_add_allocated_folios(h, &page_list);
3890 			spin_lock_irq(&hugetlb_lock);
3891 			goto out;
3892 		}
3893 
3894 		spin_lock_irq(&hugetlb_lock);
3895 	}
3896 
3897 	/* Add allocated pages to the pool */
3898 	if (!list_empty(&page_list)) {
3899 		spin_unlock_irq(&hugetlb_lock);
3900 		prep_and_add_allocated_folios(h, &page_list);
3901 		spin_lock_irq(&hugetlb_lock);
3902 	}
3903 
3904 	/*
3905 	 * Decrease the pool size
3906 	 * First return free pages to the buddy allocator (being careful
3907 	 * to keep enough around to satisfy reservations).  Then place
3908 	 * pages into surplus state as needed so the pool will shrink
3909 	 * to the desired size as pages become free.
3910 	 *
3911 	 * By placing pages into the surplus state independent of the
3912 	 * overcommit value, we are allowing the surplus pool size to
3913 	 * exceed overcommit. There are few sane options here. Since
3914 	 * alloc_surplus_hugetlb_folio() is checking the global counter,
3915 	 * though, we'll note that we're not allowed to exceed surplus
3916 	 * and won't grow the pool anywhere else. Not until one of the
3917 	 * sysctls are changed, or the surplus pages go out of use.
3918 	 *
3919 	 * min_count is the expected number of persistent pages, we
3920 	 * shouldn't calculate min_count by using
3921 	 * resv_huge_pages + persistent_huge_pages() - free_huge_pages,
3922 	 * because there may exist free surplus huge pages, and this will
3923 	 * lead to subtracting twice. Free surplus huge pages come from HVO
3924 	 * failing to restore vmemmap, see comments in the callers of
3925 	 * hugetlb_vmemmap_restore_folio(). Thus, we should calculate
3926 	 * persistent free count first.
3927 	 */
3928 	persistent_free_count = h->free_huge_pages;
3929 	if (h->free_huge_pages > persistent_huge_pages(h)) {
3930 		if (h->free_huge_pages > h->surplus_huge_pages)
3931 			persistent_free_count -= h->surplus_huge_pages;
3932 		else
3933 			persistent_free_count = 0;
3934 	}
3935 	min_count = h->resv_huge_pages + persistent_huge_pages(h) - persistent_free_count;
3936 	min_count = max(count, min_count);
3937 	try_to_free_low(h, min_count, nodes_allowed);
3938 
3939 	/*
3940 	 * Collect pages to be removed on list without dropping lock
3941 	 */
3942 	while (min_count < persistent_huge_pages(h)) {
3943 		folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0);
3944 		if (!folio)
3945 			break;
3946 
3947 		list_add(&folio->lru, &page_list);
3948 	}
3949 	/* free the pages after dropping lock */
3950 	spin_unlock_irq(&hugetlb_lock);
3951 	update_and_free_pages_bulk(h, &page_list);
3952 	flush_free_hpage_work(h);
3953 	spin_lock_irq(&hugetlb_lock);
3954 
3955 	while (count < persistent_huge_pages(h)) {
3956 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
3957 			break;
3958 	}
3959 out:
3960 	h->max_huge_pages = persistent_huge_pages(h);
3961 	spin_unlock_irq(&hugetlb_lock);
3962 	mutex_unlock(&h->resize_lock);
3963 
3964 	NODEMASK_FREE(node_alloc_noretry);
3965 
3966 	return 0;
3967 }
3968 
3969 static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst,
3970 				       struct list_head *src_list)
3971 {
3972 	long rc;
3973 	struct folio *folio, *next;
3974 	LIST_HEAD(dst_list);
3975 	LIST_HEAD(ret_list);
3976 
3977 	rc = hugetlb_vmemmap_restore_folios(src, src_list, &ret_list);
3978 	list_splice_init(&ret_list, src_list);
3979 
3980 	/*
3981 	 * Taking target hstate mutex synchronizes with set_max_huge_pages.
3982 	 * Without the mutex, pages added to target hstate could be marked
3983 	 * as surplus.
3984 	 *
3985 	 * Note that we already hold src->resize_lock.  To prevent deadlock,
3986 	 * use the convention of always taking larger size hstate mutex first.
3987 	 */
3988 	mutex_lock(&dst->resize_lock);
3989 
3990 	list_for_each_entry_safe(folio, next, src_list, lru) {
3991 		int i;
3992 		bool cma;
3993 
3994 		if (folio_test_hugetlb_vmemmap_optimized(folio))
3995 			continue;
3996 
3997 		cma = folio_test_hugetlb_cma(folio);
3998 
3999 		list_del(&folio->lru);
4000 
4001 		split_page_owner(&folio->page, huge_page_order(src), huge_page_order(dst));
4002 		pgalloc_tag_split(folio, huge_page_order(src), huge_page_order(dst));
4003 
4004 		for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) {
4005 			struct page *page = folio_page(folio, i);
4006 			/* Careful: see __split_huge_page_tail() */
4007 			struct folio *new_folio = (struct folio *)page;
4008 
4009 			clear_compound_head(page);
4010 			prep_compound_page(page, dst->order);
4011 
4012 			new_folio->mapping = NULL;
4013 			init_new_hugetlb_folio(new_folio);
4014 			/* Copy the CMA flag so that it is freed correctly */
4015 			if (cma)
4016 				folio_set_hugetlb_cma(new_folio);
4017 			list_add(&new_folio->lru, &dst_list);
4018 		}
4019 	}
4020 
4021 	prep_and_add_allocated_folios(dst, &dst_list);
4022 
4023 	mutex_unlock(&dst->resize_lock);
4024 
4025 	return rc;
4026 }
4027 
4028 long demote_pool_huge_page(struct hstate *src, nodemask_t *nodes_allowed,
4029 			   unsigned long nr_to_demote)
4030 	__must_hold(&hugetlb_lock)
4031 {
4032 	int nr_nodes, node;
4033 	struct hstate *dst;
4034 	long rc = 0;
4035 	long nr_demoted = 0;
4036 
4037 	lockdep_assert_held(&hugetlb_lock);
4038 
4039 	/* We should never get here if no demote order */
4040 	if (!src->demote_order) {
4041 		pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
4042 		return -EINVAL;		/* internal error */
4043 	}
4044 	dst = size_to_hstate(PAGE_SIZE << src->demote_order);
4045 
4046 	for_each_node_mask_to_free(src, nr_nodes, node, nodes_allowed) {
4047 		LIST_HEAD(list);
4048 		struct folio *folio, *next;
4049 
4050 		list_for_each_entry_safe(folio, next, &src->hugepage_freelists[node], lru) {
4051 			if (folio_test_hwpoison(folio))
4052 				continue;
4053 
4054 			remove_hugetlb_folio(src, folio, false);
4055 			list_add(&folio->lru, &list);
4056 
4057 			if (++nr_demoted == nr_to_demote)
4058 				break;
4059 		}
4060 
4061 		spin_unlock_irq(&hugetlb_lock);
4062 
4063 		rc = demote_free_hugetlb_folios(src, dst, &list);
4064 
4065 		spin_lock_irq(&hugetlb_lock);
4066 
4067 		list_for_each_entry_safe(folio, next, &list, lru) {
4068 			list_del(&folio->lru);
4069 			add_hugetlb_folio(src, folio, false);
4070 
4071 			nr_demoted--;
4072 		}
4073 
4074 		if (rc < 0 || nr_demoted == nr_to_demote)
4075 			break;
4076 	}
4077 
4078 	/*
4079 	 * Not absolutely necessary, but for consistency update max_huge_pages
4080 	 * based on pool changes for the demoted page.
4081 	 */
4082 	src->max_huge_pages -= nr_demoted;
4083 	dst->max_huge_pages += nr_demoted << (huge_page_order(src) - huge_page_order(dst));
4084 
4085 	if (rc < 0)
4086 		return rc;
4087 
4088 	if (nr_demoted)
4089 		return nr_demoted;
4090 	/*
4091 	 * Only way to get here is if all pages on free lists are poisoned.
4092 	 * Return -EBUSY so that caller will not retry.
4093 	 */
4094 	return -EBUSY;
4095 }
4096 
4097 ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
4098 					   struct hstate *h, int nid,
4099 					   unsigned long count, size_t len)
4100 {
4101 	int err;
4102 	nodemask_t nodes_allowed, *n_mask;
4103 
4104 	if (hstate_is_gigantic_no_runtime(h))
4105 		return -EINVAL;
4106 
4107 	if (nid == NUMA_NO_NODE) {
4108 		/*
4109 		 * global hstate attribute
4110 		 */
4111 		if (!(obey_mempolicy &&
4112 				init_nodemask_of_mempolicy(&nodes_allowed)))
4113 			n_mask = &node_states[N_MEMORY];
4114 		else
4115 			n_mask = &nodes_allowed;
4116 	} else {
4117 		/*
4118 		 * Node specific request.  count adjustment happens in
4119 		 * set_max_huge_pages() after acquiring hugetlb_lock.
4120 		 */
4121 		init_nodemask_of_node(&nodes_allowed, nid);
4122 		n_mask = &nodes_allowed;
4123 	}
4124 
4125 	err = set_max_huge_pages(h, count, nid, n_mask);
4126 
4127 	return err ? err : len;
4128 }
4129 
4130 static int __init hugetlb_init(void)
4131 {
4132 	int i;
4133 
4134 	BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
4135 			__NR_HPAGEFLAGS);
4136 	BUILD_BUG_ON_INVALID(HUGETLB_PAGE_ORDER > MAX_FOLIO_ORDER);
4137 
4138 	if (!hugepages_supported()) {
4139 		if (hugetlb_max_hstate || default_hstate_max_huge_pages)
4140 			pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
4141 		return 0;
4142 	}
4143 
4144 	/*
4145 	 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists.  Some
4146 	 * architectures depend on setup being done here.
4147 	 */
4148 	hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
4149 	if (!parsed_default_hugepagesz) {
4150 		/*
4151 		 * If we did not parse a default huge page size, set
4152 		 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4153 		 * number of huge pages for this default size was implicitly
4154 		 * specified, set that here as well.
4155 		 * Note that the implicit setting will overwrite an explicit
4156 		 * setting.  A warning will be printed in this case.
4157 		 */
4158 		default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
4159 		if (default_hstate_max_huge_pages) {
4160 			if (default_hstate.max_huge_pages) {
4161 				char buf[32];
4162 
4163 				string_get_size(huge_page_size(&default_hstate),
4164 					1, STRING_UNITS_2, buf, 32);
4165 				pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4166 					default_hstate.max_huge_pages, buf);
4167 				pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4168 					default_hstate_max_huge_pages);
4169 			}
4170 			default_hstate.max_huge_pages =
4171 				default_hstate_max_huge_pages;
4172 
4173 			for_each_online_node(i)
4174 				default_hstate.max_huge_pages_node[i] =
4175 					default_hugepages_in_node[i];
4176 		}
4177 	}
4178 
4179 	hugetlb_init_hstates();
4180 	gather_bootmem_prealloc();
4181 	report_hugepages();
4182 
4183 	hugetlb_sysfs_init();
4184 	hugetlb_cgroup_file_init();
4185 	hugetlb_sysctl_init();
4186 
4187 #ifdef CONFIG_SMP
4188 	num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
4189 #else
4190 	num_fault_mutexes = 1;
4191 #endif
4192 	hugetlb_fault_mutex_table =
4193 		kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
4194 			      GFP_KERNEL);
4195 	BUG_ON(!hugetlb_fault_mutex_table);
4196 
4197 	for (i = 0; i < num_fault_mutexes; i++)
4198 		mutex_init(&hugetlb_fault_mutex_table[i]);
4199 	return 0;
4200 }
4201 subsys_initcall(hugetlb_init);
4202 
4203 /* Overwritten by architectures with more huge page sizes */
4204 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
4205 {
4206 	return size == HPAGE_SIZE;
4207 }
4208 
4209 void __init hugetlb_add_hstate(unsigned int order)
4210 {
4211 	struct hstate *h;
4212 	unsigned long i;
4213 
4214 	if (size_to_hstate(PAGE_SIZE << order)) {
4215 		return;
4216 	}
4217 	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
4218 	BUG_ON(order < order_base_2(__NR_USED_SUBPAGE));
4219 	WARN_ON(order > MAX_FOLIO_ORDER);
4220 	h = &hstates[hugetlb_max_hstate++];
4221 	__mutex_init(&h->resize_lock, "resize mutex", &h->resize_key);
4222 	h->order = order;
4223 	h->mask = ~(huge_page_size(h) - 1);
4224 	for (i = 0; i < MAX_NUMNODES; ++i)
4225 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
4226 	INIT_LIST_HEAD(&h->hugepage_activelist);
4227 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
4228 					huge_page_size(h)/SZ_1K);
4229 
4230 	parsed_hstate = h;
4231 }
4232 
4233 bool __init __weak hugetlb_node_alloc_supported(void)
4234 {
4235 	return true;
4236 }
4237 
4238 static void __init hugepages_clear_pages_in_node(void)
4239 {
4240 	if (!hugetlb_max_hstate) {
4241 		default_hstate_max_huge_pages = 0;
4242 		memset(default_hugepages_in_node, 0,
4243 			sizeof(default_hugepages_in_node));
4244 	} else {
4245 		parsed_hstate->max_huge_pages = 0;
4246 		memset(parsed_hstate->max_huge_pages_node, 0,
4247 			sizeof(parsed_hstate->max_huge_pages_node));
4248 	}
4249 }
4250 
4251 static __init int hugetlb_add_param(char *s, int (*setup)(char *))
4252 {
4253 	size_t len;
4254 	char *p;
4255 
4256 	if (hugetlb_param_index >= HUGE_MAX_CMDLINE_ARGS)
4257 		return -EINVAL;
4258 
4259 	len = strlen(s) + 1;
4260 	if (len + hstate_cmdline_index > sizeof(hstate_cmdline_buf))
4261 		return -EINVAL;
4262 
4263 	p = &hstate_cmdline_buf[hstate_cmdline_index];
4264 	memcpy(p, s, len);
4265 	hstate_cmdline_index += len;
4266 
4267 	hugetlb_params[hugetlb_param_index].val = p;
4268 	hugetlb_params[hugetlb_param_index].setup = setup;
4269 
4270 	hugetlb_param_index++;
4271 
4272 	return 0;
4273 }
4274 
4275 static __init void hugetlb_parse_params(void)
4276 {
4277 	int i;
4278 	struct hugetlb_cmdline *hcp;
4279 
4280 	for (i = 0; i < hugetlb_param_index; i++) {
4281 		hcp = &hugetlb_params[i];
4282 
4283 		hcp->setup(hcp->val);
4284 	}
4285 
4286 	hugetlb_cma_validate_params();
4287 }
4288 
4289 /*
4290  * hugepages command line processing
4291  * hugepages normally follows a valid hugepagsz or default_hugepagsz
4292  * specification.  If not, ignore the hugepages value.  hugepages can also
4293  * be the first huge page command line  option in which case it implicitly
4294  * specifies the number of huge pages for the default size.
4295  */
4296 static int __init hugepages_setup(char *s)
4297 {
4298 	unsigned long *mhp;
4299 	static unsigned long *last_mhp;
4300 	int node = NUMA_NO_NODE;
4301 	int count;
4302 	unsigned long tmp;
4303 	char *p = s;
4304 
4305 	if (!hugepages_supported()) {
4306 		pr_warn("HugeTLB: hugepages unsupported, ignoring hugepages=%s cmdline\n", s);
4307 		return 0;
4308 	}
4309 
4310 	if (!parsed_valid_hugepagesz) {
4311 		pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
4312 		parsed_valid_hugepagesz = true;
4313 		return -EINVAL;
4314 	}
4315 
4316 	/*
4317 	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4318 	 * yet, so this hugepages= parameter goes to the "default hstate".
4319 	 * Otherwise, it goes with the previously parsed hugepagesz or
4320 	 * default_hugepagesz.
4321 	 */
4322 	else if (!hugetlb_max_hstate)
4323 		mhp = &default_hstate_max_huge_pages;
4324 	else
4325 		mhp = &parsed_hstate->max_huge_pages;
4326 
4327 	if (mhp == last_mhp) {
4328 		pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
4329 		return 1;
4330 	}
4331 
4332 	while (*p) {
4333 		count = 0;
4334 		if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4335 			goto invalid;
4336 		/* Parameter is node format */
4337 		if (p[count] == ':') {
4338 			if (!hugetlb_node_alloc_supported()) {
4339 				pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4340 				return 1;
4341 			}
4342 			if (tmp >= MAX_NUMNODES || !node_online(tmp))
4343 				goto invalid;
4344 			node = array_index_nospec(tmp, MAX_NUMNODES);
4345 			p += count + 1;
4346 			/* Parse hugepages */
4347 			if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4348 				goto invalid;
4349 			if (!hugetlb_max_hstate)
4350 				default_hugepages_in_node[node] = tmp;
4351 			else
4352 				parsed_hstate->max_huge_pages_node[node] = tmp;
4353 			*mhp += tmp;
4354 			/* Go to parse next node*/
4355 			if (p[count] == ',')
4356 				p += count + 1;
4357 			else
4358 				break;
4359 		} else {
4360 			if (p != s)
4361 				goto invalid;
4362 			*mhp = tmp;
4363 			break;
4364 		}
4365 	}
4366 
4367 	last_mhp = mhp;
4368 
4369 	return 0;
4370 
4371 invalid:
4372 	pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
4373 	hugepages_clear_pages_in_node();
4374 	return -EINVAL;
4375 }
4376 hugetlb_early_param("hugepages", hugepages_setup);
4377 
4378 /*
4379  * hugepagesz command line processing
4380  * A specific huge page size can only be specified once with hugepagesz.
4381  * hugepagesz is followed by hugepages on the command line.  The global
4382  * variable 'parsed_valid_hugepagesz' is used to determine if prior
4383  * hugepagesz argument was valid.
4384  */
4385 static int __init hugepagesz_setup(char *s)
4386 {
4387 	unsigned long size;
4388 	struct hstate *h;
4389 
4390 	if (!hugepages_supported()) {
4391 		pr_warn("HugeTLB: hugepages unsupported, ignoring hugepagesz=%s cmdline\n", s);
4392 		return 0;
4393 	}
4394 
4395 	parsed_valid_hugepagesz = false;
4396 	size = (unsigned long)memparse(s, NULL);
4397 
4398 	if (!arch_hugetlb_valid_size(size)) {
4399 		pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
4400 		return -EINVAL;
4401 	}
4402 
4403 	h = size_to_hstate(size);
4404 	if (h) {
4405 		/*
4406 		 * hstate for this size already exists.  This is normally
4407 		 * an error, but is allowed if the existing hstate is the
4408 		 * default hstate.  More specifically, it is only allowed if
4409 		 * the number of huge pages for the default hstate was not
4410 		 * previously specified.
4411 		 */
4412 		if (!parsed_default_hugepagesz ||  h != &default_hstate ||
4413 		    default_hstate.max_huge_pages) {
4414 			pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
4415 			return -EINVAL;
4416 		}
4417 
4418 		/*
4419 		 * No need to call hugetlb_add_hstate() as hstate already
4420 		 * exists.  But, do set parsed_hstate so that a following
4421 		 * hugepages= parameter will be applied to this hstate.
4422 		 */
4423 		parsed_hstate = h;
4424 		parsed_valid_hugepagesz = true;
4425 		return 0;
4426 	}
4427 
4428 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4429 	parsed_valid_hugepagesz = true;
4430 	return 0;
4431 }
4432 hugetlb_early_param("hugepagesz", hugepagesz_setup);
4433 
4434 /*
4435  * default_hugepagesz command line input
4436  * Only one instance of default_hugepagesz allowed on command line.
4437  */
4438 static int __init default_hugepagesz_setup(char *s)
4439 {
4440 	unsigned long size;
4441 	int i;
4442 
4443 	if (!hugepages_supported()) {
4444 		pr_warn("HugeTLB: hugepages unsupported, ignoring default_hugepagesz=%s cmdline\n",
4445 			s);
4446 		return 0;
4447 	}
4448 
4449 	parsed_valid_hugepagesz = false;
4450 	if (parsed_default_hugepagesz) {
4451 		pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
4452 		return -EINVAL;
4453 	}
4454 
4455 	size = (unsigned long)memparse(s, NULL);
4456 
4457 	if (!arch_hugetlb_valid_size(size)) {
4458 		pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
4459 		return -EINVAL;
4460 	}
4461 
4462 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4463 	parsed_valid_hugepagesz = true;
4464 	parsed_default_hugepagesz = true;
4465 	default_hstate_idx = hstate_index(size_to_hstate(size));
4466 
4467 	/*
4468 	 * The number of default huge pages (for this size) could have been
4469 	 * specified as the first hugetlb parameter: hugepages=X.  If so,
4470 	 * then default_hstate_max_huge_pages is set.  If the default huge
4471 	 * page size is gigantic (> MAX_PAGE_ORDER), then the pages must be
4472 	 * allocated here from bootmem allocator.
4473 	 */
4474 	if (default_hstate_max_huge_pages) {
4475 		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
4476 		/*
4477 		 * Since this is an early parameter, we can't check
4478 		 * NUMA node state yet, so loop through MAX_NUMNODES.
4479 		 */
4480 		for (i = 0; i < MAX_NUMNODES; i++) {
4481 			if (default_hugepages_in_node[i] != 0)
4482 				default_hstate.max_huge_pages_node[i] =
4483 					default_hugepages_in_node[i];
4484 		}
4485 		default_hstate_max_huge_pages = 0;
4486 	}
4487 
4488 	return 0;
4489 }
4490 hugetlb_early_param("default_hugepagesz", default_hugepagesz_setup);
4491 
4492 void __init hugetlb_bootmem_set_nodes(void)
4493 {
4494 	int i, nid;
4495 	unsigned long start_pfn, end_pfn;
4496 
4497 	if (!nodes_empty(hugetlb_bootmem_nodes))
4498 		return;
4499 
4500 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
4501 		if (end_pfn > start_pfn)
4502 			node_set(nid, hugetlb_bootmem_nodes);
4503 	}
4504 }
4505 
4506 void __init hugetlb_bootmem_alloc(void)
4507 {
4508 	struct hstate *h;
4509 	int i;
4510 
4511 	hugetlb_bootmem_set_nodes();
4512 
4513 	for (i = 0; i < MAX_NUMNODES; i++)
4514 		INIT_LIST_HEAD(&huge_boot_pages[i]);
4515 
4516 	hugetlb_parse_params();
4517 
4518 	for_each_hstate(h) {
4519 		h->next_nid_to_alloc = first_online_node;
4520 
4521 		if (hstate_is_gigantic(h))
4522 			hugetlb_hstate_alloc_pages(h);
4523 	}
4524 }
4525 
4526 /*
4527  * hugepage_alloc_threads command line parsing.
4528  *
4529  * When set, use this specific number of threads for the boot
4530  * allocation of hugepages.
4531  */
4532 static int __init hugepage_alloc_threads_setup(char *s)
4533 {
4534 	unsigned long allocation_threads;
4535 
4536 	if (kstrtoul(s, 0, &allocation_threads) != 0)
4537 		return 1;
4538 
4539 	if (allocation_threads == 0)
4540 		return 1;
4541 
4542 	hugepage_allocation_threads = allocation_threads;
4543 
4544 	return 1;
4545 }
4546 __setup("hugepage_alloc_threads=", hugepage_alloc_threads_setup);
4547 
4548 static unsigned int allowed_mems_nr(struct hstate *h)
4549 {
4550 	int node;
4551 	unsigned int nr = 0;
4552 	nodemask_t *mbind_nodemask;
4553 	unsigned int *array = h->free_huge_pages_node;
4554 	gfp_t gfp_mask = htlb_alloc_mask(h);
4555 
4556 	mbind_nodemask = policy_mbind_nodemask(gfp_mask);
4557 	for_each_node_mask(node, cpuset_current_mems_allowed) {
4558 		if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
4559 			nr += array[node];
4560 	}
4561 
4562 	return nr;
4563 }
4564 
4565 void hugetlb_report_meminfo(struct seq_file *m)
4566 {
4567 	struct hstate *h;
4568 	unsigned long total = 0;
4569 
4570 	if (!hugepages_supported())
4571 		return;
4572 
4573 	for_each_hstate(h) {
4574 		unsigned long count = h->nr_huge_pages;
4575 
4576 		total += huge_page_size(h) * count;
4577 
4578 		if (h == &default_hstate)
4579 			seq_printf(m,
4580 				   "HugePages_Total:   %5lu\n"
4581 				   "HugePages_Free:    %5lu\n"
4582 				   "HugePages_Rsvd:    %5lu\n"
4583 				   "HugePages_Surp:    %5lu\n"
4584 				   "Hugepagesize:   %8lu kB\n",
4585 				   count,
4586 				   h->free_huge_pages,
4587 				   h->resv_huge_pages,
4588 				   h->surplus_huge_pages,
4589 				   huge_page_size(h) / SZ_1K);
4590 	}
4591 
4592 	seq_printf(m, "Hugetlb:        %8lu kB\n", total / SZ_1K);
4593 }
4594 
4595 int hugetlb_report_node_meminfo(char *buf, int len, int nid)
4596 {
4597 	struct hstate *h = &default_hstate;
4598 
4599 	if (!hugepages_supported())
4600 		return 0;
4601 
4602 	return sysfs_emit_at(buf, len,
4603 			     "Node %d HugePages_Total: %5u\n"
4604 			     "Node %d HugePages_Free:  %5u\n"
4605 			     "Node %d HugePages_Surp:  %5u\n",
4606 			     nid, h->nr_huge_pages_node[nid],
4607 			     nid, h->free_huge_pages_node[nid],
4608 			     nid, h->surplus_huge_pages_node[nid]);
4609 }
4610 
4611 void hugetlb_show_meminfo_node(int nid)
4612 {
4613 	struct hstate *h;
4614 
4615 	if (!hugepages_supported())
4616 		return;
4617 
4618 	for_each_hstate(h)
4619 		printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
4620 			nid,
4621 			h->nr_huge_pages_node[nid],
4622 			h->free_huge_pages_node[nid],
4623 			h->surplus_huge_pages_node[nid],
4624 			huge_page_size(h) / SZ_1K);
4625 }
4626 
4627 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
4628 {
4629 	seq_printf(m, "HugetlbPages:\t%8lu kB\n",
4630 		   K(atomic_long_read(&mm->hugetlb_usage)));
4631 }
4632 
4633 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
4634 unsigned long hugetlb_total_pages(void)
4635 {
4636 	struct hstate *h;
4637 	unsigned long nr_total_pages = 0;
4638 
4639 	for_each_hstate(h)
4640 		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
4641 	return nr_total_pages;
4642 }
4643 
4644 static int hugetlb_acct_memory(struct hstate *h, long delta)
4645 {
4646 	int ret = -ENOMEM;
4647 
4648 	if (!delta)
4649 		return 0;
4650 
4651 	spin_lock_irq(&hugetlb_lock);
4652 	/*
4653 	 * When cpuset is configured, it breaks the strict hugetlb page
4654 	 * reservation as the accounting is done on a global variable. Such
4655 	 * reservation is completely rubbish in the presence of cpuset because
4656 	 * the reservation is not checked against page availability for the
4657 	 * current cpuset. Application can still potentially OOM'ed by kernel
4658 	 * with lack of free htlb page in cpuset that the task is in.
4659 	 * Attempt to enforce strict accounting with cpuset is almost
4660 	 * impossible (or too ugly) because cpuset is too fluid that
4661 	 * task or memory node can be dynamically moved between cpusets.
4662 	 *
4663 	 * The change of semantics for shared hugetlb mapping with cpuset is
4664 	 * undesirable. However, in order to preserve some of the semantics,
4665 	 * we fall back to check against current free page availability as
4666 	 * a best attempt and hopefully to minimize the impact of changing
4667 	 * semantics that cpuset has.
4668 	 *
4669 	 * Apart from cpuset, we also have memory policy mechanism that
4670 	 * also determines from which node the kernel will allocate memory
4671 	 * in a NUMA system. So similar to cpuset, we also should consider
4672 	 * the memory policy of the current task. Similar to the description
4673 	 * above.
4674 	 */
4675 	if (delta > 0) {
4676 		if (gather_surplus_pages(h, delta) < 0)
4677 			goto out;
4678 
4679 		if (delta > allowed_mems_nr(h)) {
4680 			return_unused_surplus_pages(h, delta);
4681 			goto out;
4682 		}
4683 	}
4684 
4685 	ret = 0;
4686 	if (delta < 0)
4687 		return_unused_surplus_pages(h, (unsigned long) -delta);
4688 
4689 out:
4690 	spin_unlock_irq(&hugetlb_lock);
4691 	return ret;
4692 }
4693 
4694 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
4695 {
4696 	struct resv_map *resv = vma_resv_map(vma);
4697 
4698 	/*
4699 	 * HPAGE_RESV_OWNER indicates a private mapping.
4700 	 * This new VMA should share its siblings reservation map if present.
4701 	 * The VMA will only ever have a valid reservation map pointer where
4702 	 * it is being copied for another still existing VMA.  As that VMA
4703 	 * has a reference to the reservation map it cannot disappear until
4704 	 * after this open call completes.  It is therefore safe to take a
4705 	 * new reference here without additional locking.
4706 	 */
4707 	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
4708 		resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
4709 		kref_get(&resv->refs);
4710 	}
4711 
4712 	/*
4713 	 * vma_lock structure for sharable mappings is vma specific.
4714 	 * Clear old pointer (if copied via vm_area_dup) and allocate
4715 	 * new structure.  Before clearing, make sure vma_lock is not
4716 	 * for this vma.
4717 	 */
4718 	if (vma->vm_flags & VM_MAYSHARE) {
4719 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
4720 
4721 		if (vma_lock) {
4722 			if (vma_lock->vma != vma) {
4723 				vma->vm_private_data = NULL;
4724 				hugetlb_vma_lock_alloc(vma);
4725 			} else {
4726 				pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
4727 			}
4728 		} else {
4729 			hugetlb_vma_lock_alloc(vma);
4730 		}
4731 	}
4732 }
4733 
4734 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
4735 {
4736 	struct hstate *h = hstate_vma(vma);
4737 	struct resv_map *resv;
4738 	struct hugepage_subpool *spool = subpool_vma(vma);
4739 	unsigned long reserve, start, end;
4740 	long gbl_reserve;
4741 
4742 	hugetlb_vma_lock_free(vma);
4743 
4744 	resv = vma_resv_map(vma);
4745 	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4746 		return;
4747 
4748 	start = vma_hugecache_offset(h, vma, vma->vm_start);
4749 	end = vma_hugecache_offset(h, vma, vma->vm_end);
4750 
4751 	reserve = (end - start) - region_count(resv, start, end);
4752 	hugetlb_cgroup_uncharge_counter(resv, start, end);
4753 	if (reserve) {
4754 		/*
4755 		 * Decrement reserve counts.  The global reserve count may be
4756 		 * adjusted if the subpool has a minimum size.
4757 		 */
4758 		gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
4759 		hugetlb_acct_memory(h, -gbl_reserve);
4760 	}
4761 
4762 	kref_put(&resv->refs, resv_map_release);
4763 }
4764 
4765 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
4766 {
4767 	if (addr & ~(huge_page_mask(hstate_vma(vma))))
4768 		return -EINVAL;
4769 	return 0;
4770 }
4771 
4772 void hugetlb_split(struct vm_area_struct *vma, unsigned long addr)
4773 {
4774 	/*
4775 	 * PMD sharing is only possible for PUD_SIZE-aligned address ranges
4776 	 * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
4777 	 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
4778 	 * This function is called in the middle of a VMA split operation, with
4779 	 * MM, VMA and rmap all write-locked to prevent concurrent page table
4780 	 * walks (except hardware and gup_fast()).
4781 	 */
4782 	vma_assert_write_locked(vma);
4783 	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
4784 
4785 	if (addr & ~PUD_MASK) {
4786 		unsigned long floor = addr & PUD_MASK;
4787 		unsigned long ceil = floor + PUD_SIZE;
4788 
4789 		if (floor >= vma->vm_start && ceil <= vma->vm_end) {
4790 			/*
4791 			 * Locking:
4792 			 * Use take_locks=false here.
4793 			 * The file rmap lock is already held.
4794 			 * The hugetlb VMA lock can't be taken when we already
4795 			 * hold the file rmap lock, and we don't need it because
4796 			 * its purpose is to synchronize against concurrent page
4797 			 * table walks, which are not possible thanks to the
4798 			 * locks held by our caller.
4799 			 */
4800 			hugetlb_unshare_pmds(vma, floor, ceil, /* take_locks = */ false);
4801 		}
4802 	}
4803 }
4804 
4805 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
4806 {
4807 	return huge_page_size(hstate_vma(vma));
4808 }
4809 
4810 /*
4811  * We cannot handle pagefaults against hugetlb pages at all.  They cause
4812  * handle_mm_fault() to try to instantiate regular-sized pages in the
4813  * hugepage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
4814  * this far.
4815  */
4816 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
4817 {
4818 	BUG();
4819 	return 0;
4820 }
4821 
4822 /*
4823  * When a new function is introduced to vm_operations_struct and added
4824  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
4825  * This is because under System V memory model, mappings created via
4826  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
4827  * their original vm_ops are overwritten with shm_vm_ops.
4828  */
4829 const struct vm_operations_struct hugetlb_vm_ops = {
4830 	.fault = hugetlb_vm_op_fault,
4831 	.open = hugetlb_vm_op_open,
4832 	.close = hugetlb_vm_op_close,
4833 	.may_split = hugetlb_vm_op_split,
4834 	.pagesize = hugetlb_vm_op_pagesize,
4835 };
4836 
4837 static pte_t make_huge_pte(struct vm_area_struct *vma, struct folio *folio,
4838 		bool try_mkwrite)
4839 {
4840 	pte_t entry = folio_mk_pte(folio, vma->vm_page_prot);
4841 	unsigned int shift = huge_page_shift(hstate_vma(vma));
4842 
4843 	if (try_mkwrite && (vma->vm_flags & VM_WRITE)) {
4844 		entry = pte_mkwrite_novma(pte_mkdirty(entry));
4845 	} else {
4846 		entry = pte_wrprotect(entry);
4847 	}
4848 	entry = pte_mkyoung(entry);
4849 	entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
4850 
4851 	return entry;
4852 }
4853 
4854 static void set_huge_ptep_writable(struct vm_area_struct *vma,
4855 				   unsigned long address, pte_t *ptep)
4856 {
4857 	pte_t entry;
4858 
4859 	entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(vma->vm_mm, address, ptep)));
4860 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
4861 		update_mmu_cache(vma, address, ptep);
4862 }
4863 
4864 static void set_huge_ptep_maybe_writable(struct vm_area_struct *vma,
4865 					 unsigned long address, pte_t *ptep)
4866 {
4867 	if (vma->vm_flags & VM_WRITE)
4868 		set_huge_ptep_writable(vma, address, ptep);
4869 }
4870 
4871 static void
4872 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
4873 		      struct folio *new_folio, pte_t old, unsigned long sz)
4874 {
4875 	pte_t newpte = make_huge_pte(vma, new_folio, true);
4876 
4877 	__folio_mark_uptodate(new_folio);
4878 	hugetlb_add_new_anon_rmap(new_folio, vma, addr);
4879 	if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old))
4880 		newpte = huge_pte_mkuffd_wp(newpte);
4881 	set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz);
4882 	hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
4883 	folio_set_hugetlb_migratable(new_folio);
4884 }
4885 
4886 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
4887 			    struct vm_area_struct *dst_vma,
4888 			    struct vm_area_struct *src_vma)
4889 {
4890 	pte_t *src_pte, *dst_pte, entry;
4891 	struct folio *pte_folio;
4892 	unsigned long addr;
4893 	bool cow = is_cow_mapping(src_vma->vm_flags);
4894 	struct hstate *h = hstate_vma(src_vma);
4895 	unsigned long sz = huge_page_size(h);
4896 	unsigned long npages = pages_per_huge_page(h);
4897 	struct mmu_notifier_range range;
4898 	unsigned long last_addr_mask;
4899 	softleaf_t softleaf;
4900 	int ret = 0;
4901 
4902 	if (cow) {
4903 		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src,
4904 					src_vma->vm_start,
4905 					src_vma->vm_end);
4906 		mmu_notifier_invalidate_range_start(&range);
4907 		vma_assert_write_locked(src_vma);
4908 		raw_write_seqcount_begin(&src->write_protect_seq);
4909 	} else {
4910 		/*
4911 		 * For shared mappings the vma lock must be held before
4912 		 * calling hugetlb_walk() in the src vma. Otherwise, the
4913 		 * returned ptep could go away if part of a shared pmd and
4914 		 * another thread calls huge_pmd_unshare.
4915 		 */
4916 		hugetlb_vma_lock_read(src_vma);
4917 	}
4918 
4919 	last_addr_mask = hugetlb_mask_last_page(h);
4920 	for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
4921 		spinlock_t *src_ptl, *dst_ptl;
4922 		src_pte = hugetlb_walk(src_vma, addr, sz);
4923 		if (!src_pte) {
4924 			addr |= last_addr_mask;
4925 			continue;
4926 		}
4927 		dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz);
4928 		if (!dst_pte) {
4929 			ret = -ENOMEM;
4930 			break;
4931 		}
4932 
4933 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
4934 		/* If the pagetables are shared, there is nothing to do */
4935 		if (ptdesc_pmd_is_shared(virt_to_ptdesc(dst_pte))) {
4936 			addr |= last_addr_mask;
4937 			continue;
4938 		}
4939 #endif
4940 
4941 		dst_ptl = huge_pte_lock(h, dst, dst_pte);
4942 		src_ptl = huge_pte_lockptr(h, src, src_pte);
4943 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4944 		entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
4945 again:
4946 		if (huge_pte_none(entry)) {
4947 			/* Skip if src entry none. */
4948 			goto next;
4949 		}
4950 
4951 		softleaf = softleaf_from_pte(entry);
4952 		if (unlikely(softleaf_is_hwpoison(softleaf))) {
4953 			if (!userfaultfd_wp(dst_vma))
4954 				entry = huge_pte_clear_uffd_wp(entry);
4955 			set_huge_pte_at(dst, addr, dst_pte, entry, sz);
4956 		} else if (unlikely(softleaf_is_migration(softleaf))) {
4957 			bool uffd_wp = pte_swp_uffd_wp(entry);
4958 
4959 			if (!softleaf_is_migration_read(softleaf) && cow) {
4960 				/*
4961 				 * COW mappings require pages in both
4962 				 * parent and child to be set to read.
4963 				 */
4964 				softleaf = make_readable_migration_entry(
4965 							swp_offset(softleaf));
4966 				entry = swp_entry_to_pte(softleaf);
4967 				if (userfaultfd_wp(src_vma) && uffd_wp)
4968 					entry = pte_swp_mkuffd_wp(entry);
4969 				set_huge_pte_at(src, addr, src_pte, entry, sz);
4970 			}
4971 			if (!userfaultfd_wp(dst_vma))
4972 				entry = huge_pte_clear_uffd_wp(entry);
4973 			set_huge_pte_at(dst, addr, dst_pte, entry, sz);
4974 		} else if (unlikely(pte_is_marker(entry))) {
4975 			const pte_marker marker = copy_pte_marker(softleaf, dst_vma);
4976 
4977 			if (marker)
4978 				set_huge_pte_at(dst, addr, dst_pte,
4979 						make_pte_marker(marker), sz);
4980 		} else {
4981 			entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
4982 			pte_folio = page_folio(pte_page(entry));
4983 			folio_get(pte_folio);
4984 
4985 			/*
4986 			 * Failing to duplicate the anon rmap is a rare case
4987 			 * where we see pinned hugetlb pages while they're
4988 			 * prone to COW. We need to do the COW earlier during
4989 			 * fork.
4990 			 *
4991 			 * When pre-allocating the page or copying data, we
4992 			 * need to be without the pgtable locks since we could
4993 			 * sleep during the process.
4994 			 */
4995 			if (!folio_test_anon(pte_folio)) {
4996 				hugetlb_add_file_rmap(pte_folio);
4997 			} else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) {
4998 				pte_t src_pte_old = entry;
4999 				struct folio *new_folio;
5000 
5001 				spin_unlock(src_ptl);
5002 				spin_unlock(dst_ptl);
5003 				/* Do not use reserve as it's private owned */
5004 				new_folio = alloc_hugetlb_folio(dst_vma, addr, false);
5005 				if (IS_ERR(new_folio)) {
5006 					folio_put(pte_folio);
5007 					ret = PTR_ERR(new_folio);
5008 					break;
5009 				}
5010 				ret = copy_user_large_folio(new_folio, pte_folio,
5011 							    addr, dst_vma);
5012 				folio_put(pte_folio);
5013 				if (ret) {
5014 					folio_put(new_folio);
5015 					break;
5016 				}
5017 
5018 				/* Install the new hugetlb folio if src pte stable */
5019 				dst_ptl = huge_pte_lock(h, dst, dst_pte);
5020 				src_ptl = huge_pte_lockptr(h, src, src_pte);
5021 				spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5022 				entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
5023 				if (!pte_same(src_pte_old, entry)) {
5024 					restore_reserve_on_error(h, dst_vma, addr,
5025 								new_folio);
5026 					folio_put(new_folio);
5027 					/* huge_ptep of dst_pte won't change as in child */
5028 					goto again;
5029 				}
5030 				hugetlb_install_folio(dst_vma, dst_pte, addr,
5031 						      new_folio, src_pte_old, sz);
5032 				goto next;
5033 			}
5034 
5035 			if (cow) {
5036 				/*
5037 				 * No need to notify as we are downgrading page
5038 				 * table protection not changing it to point
5039 				 * to a new page.
5040 				 *
5041 				 * See Documentation/mm/mmu_notifier.rst
5042 				 */
5043 				huge_ptep_set_wrprotect(src, addr, src_pte);
5044 				entry = huge_pte_wrprotect(entry);
5045 			}
5046 
5047 			if (!userfaultfd_wp(dst_vma))
5048 				entry = huge_pte_clear_uffd_wp(entry);
5049 
5050 			set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5051 			hugetlb_count_add(npages, dst);
5052 		}
5053 
5054 next:
5055 		spin_unlock(src_ptl);
5056 		spin_unlock(dst_ptl);
5057 	}
5058 
5059 	if (cow) {
5060 		raw_write_seqcount_end(&src->write_protect_seq);
5061 		mmu_notifier_invalidate_range_end(&range);
5062 	} else {
5063 		hugetlb_vma_unlock_read(src_vma);
5064 	}
5065 
5066 	return ret;
5067 }
5068 
5069 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
5070 			  unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte,
5071 			  unsigned long sz)
5072 {
5073 	bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
5074 	struct hstate *h = hstate_vma(vma);
5075 	struct mm_struct *mm = vma->vm_mm;
5076 	spinlock_t *src_ptl, *dst_ptl;
5077 	pte_t pte;
5078 
5079 	dst_ptl = huge_pte_lock(h, mm, dst_pte);
5080 	src_ptl = huge_pte_lockptr(h, mm, src_pte);
5081 
5082 	/*
5083 	 * We don't have to worry about the ordering of src and dst ptlocks
5084 	 * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock.
5085 	 */
5086 	if (src_ptl != dst_ptl)
5087 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5088 
5089 	pte = huge_ptep_get_and_clear(mm, old_addr, src_pte, sz);
5090 
5091 	if (need_clear_uffd_wp && pte_is_uffd_wp_marker(pte)) {
5092 		huge_pte_clear(mm, new_addr, dst_pte, sz);
5093 	} else {
5094 		if (need_clear_uffd_wp) {
5095 			if (pte_present(pte))
5096 				pte = huge_pte_clear_uffd_wp(pte);
5097 			else
5098 				pte = pte_swp_clear_uffd_wp(pte);
5099 		}
5100 		set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
5101 	}
5102 
5103 	if (src_ptl != dst_ptl)
5104 		spin_unlock(src_ptl);
5105 	spin_unlock(dst_ptl);
5106 }
5107 
5108 int move_hugetlb_page_tables(struct vm_area_struct *vma,
5109 			     struct vm_area_struct *new_vma,
5110 			     unsigned long old_addr, unsigned long new_addr,
5111 			     unsigned long len)
5112 {
5113 	struct hstate *h = hstate_vma(vma);
5114 	struct address_space *mapping = vma->vm_file->f_mapping;
5115 	unsigned long sz = huge_page_size(h);
5116 	struct mm_struct *mm = vma->vm_mm;
5117 	unsigned long old_end = old_addr + len;
5118 	unsigned long last_addr_mask;
5119 	pte_t *src_pte, *dst_pte;
5120 	struct mmu_notifier_range range;
5121 	struct mmu_gather tlb;
5122 
5123 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr,
5124 				old_end);
5125 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5126 	/*
5127 	 * In case of shared PMDs, we should cover the maximum possible
5128 	 * range.
5129 	 */
5130 	flush_cache_range(vma, range.start, range.end);
5131 	tlb_gather_mmu_vma(&tlb, vma);
5132 
5133 	mmu_notifier_invalidate_range_start(&range);
5134 	last_addr_mask = hugetlb_mask_last_page(h);
5135 	/* Prevent race with file truncation */
5136 	hugetlb_vma_lock_write(vma);
5137 	i_mmap_lock_write(mapping);
5138 	for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
5139 		src_pte = hugetlb_walk(vma, old_addr, sz);
5140 		if (!src_pte) {
5141 			old_addr |= last_addr_mask;
5142 			new_addr |= last_addr_mask;
5143 			continue;
5144 		}
5145 		if (huge_pte_none(huge_ptep_get(mm, old_addr, src_pte)))
5146 			continue;
5147 
5148 		if (huge_pmd_unshare(&tlb, vma, old_addr, src_pte)) {
5149 			old_addr |= last_addr_mask;
5150 			new_addr |= last_addr_mask;
5151 			continue;
5152 		}
5153 
5154 		dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
5155 		if (!dst_pte)
5156 			break;
5157 
5158 		move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz);
5159 		tlb_remove_huge_tlb_entry(h, &tlb, src_pte, old_addr);
5160 	}
5161 
5162 	tlb_flush_mmu_tlbonly(&tlb);
5163 	huge_pmd_unshare_flush(&tlb, vma);
5164 
5165 	mmu_notifier_invalidate_range_end(&range);
5166 	i_mmap_unlock_write(mapping);
5167 	hugetlb_vma_unlock_write(vma);
5168 	tlb_finish_mmu(&tlb);
5169 
5170 	return len + old_addr - old_end;
5171 }
5172 
5173 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5174 			    unsigned long start, unsigned long end,
5175 			    struct folio *folio, zap_flags_t zap_flags)
5176 {
5177 	struct mm_struct *mm = vma->vm_mm;
5178 	const bool folio_provided = !!folio;
5179 	unsigned long address;
5180 	pte_t *ptep;
5181 	pte_t pte;
5182 	spinlock_t *ptl;
5183 	struct hstate *h = hstate_vma(vma);
5184 	unsigned long sz = huge_page_size(h);
5185 	bool adjust_reservation;
5186 	unsigned long last_addr_mask;
5187 
5188 	WARN_ON(!is_vm_hugetlb_page(vma));
5189 	BUG_ON(start & ~huge_page_mask(h));
5190 	BUG_ON(end & ~huge_page_mask(h));
5191 
5192 	/*
5193 	 * This is a hugetlb vma, all the pte entries should point
5194 	 * to huge page.
5195 	 */
5196 	tlb_change_page_size(tlb, sz);
5197 	tlb_start_vma(tlb, vma);
5198 
5199 	last_addr_mask = hugetlb_mask_last_page(h);
5200 	address = start;
5201 	for (; address < end; address += sz) {
5202 		ptep = hugetlb_walk(vma, address, sz);
5203 		if (!ptep) {
5204 			address |= last_addr_mask;
5205 			continue;
5206 		}
5207 
5208 		ptl = huge_pte_lock(h, mm, ptep);
5209 		if (huge_pmd_unshare(tlb, vma, address, ptep)) {
5210 			spin_unlock(ptl);
5211 			address |= last_addr_mask;
5212 			continue;
5213 		}
5214 
5215 		pte = huge_ptep_get(mm, address, ptep);
5216 		if (huge_pte_none(pte)) {
5217 			spin_unlock(ptl);
5218 			continue;
5219 		}
5220 
5221 		/*
5222 		 * Migrating hugepage or HWPoisoned hugepage is already
5223 		 * unmapped and its refcount is dropped, so just clear pte here.
5224 		 */
5225 		if (unlikely(!pte_present(pte))) {
5226 			/*
5227 			 * If the pte was wr-protected by uffd-wp in any of the
5228 			 * swap forms, meanwhile the caller does not want to
5229 			 * drop the uffd-wp bit in this zap, then replace the
5230 			 * pte with a marker.
5231 			 */
5232 			if (pte_swp_uffd_wp_any(pte) &&
5233 			    !(zap_flags & ZAP_FLAG_DROP_MARKER))
5234 				set_huge_pte_at(mm, address, ptep,
5235 						make_pte_marker(PTE_MARKER_UFFD_WP),
5236 						sz);
5237 			else
5238 				huge_pte_clear(mm, address, ptep, sz);
5239 			spin_unlock(ptl);
5240 			continue;
5241 		}
5242 
5243 		/*
5244 		 * If a folio is supplied, it is because a specific
5245 		 * folio is being unmapped, not a range. Ensure the folio we
5246 		 * are about to unmap is the actual folio of interest.
5247 		 */
5248 		if (folio_provided) {
5249 			if (folio != page_folio(pte_page(pte))) {
5250 				spin_unlock(ptl);
5251 				continue;
5252 			}
5253 			/*
5254 			 * Mark the VMA as having unmapped its page so that
5255 			 * future faults in this VMA will fail rather than
5256 			 * looking like data was lost
5257 			 */
5258 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
5259 		} else {
5260 			folio = page_folio(pte_page(pte));
5261 		}
5262 
5263 		pte = huge_ptep_get_and_clear(mm, address, ptep, sz);
5264 		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
5265 		if (huge_pte_dirty(pte))
5266 			folio_mark_dirty(folio);
5267 		/* Leave a uffd-wp pte marker if needed */
5268 		if (huge_pte_uffd_wp(pte) &&
5269 		    !(zap_flags & ZAP_FLAG_DROP_MARKER))
5270 			set_huge_pte_at(mm, address, ptep,
5271 					make_pte_marker(PTE_MARKER_UFFD_WP),
5272 					sz);
5273 		hugetlb_count_sub(pages_per_huge_page(h), mm);
5274 		hugetlb_remove_rmap(folio);
5275 		spin_unlock(ptl);
5276 
5277 		/*
5278 		 * Restore the reservation for anonymous page, otherwise the
5279 		 * backing page could be stolen by someone.
5280 		 * If there we are freeing a surplus, do not set the restore
5281 		 * reservation bit.
5282 		 */
5283 		adjust_reservation = false;
5284 
5285 		spin_lock_irq(&hugetlb_lock);
5286 		if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
5287 		    folio_test_anon(folio)) {
5288 			folio_set_hugetlb_restore_reserve(folio);
5289 			/* Reservation to be adjusted after the spin lock */
5290 			adjust_reservation = true;
5291 		}
5292 		spin_unlock_irq(&hugetlb_lock);
5293 
5294 		/*
5295 		 * Adjust the reservation for the region that will have the
5296 		 * reserve restored. Keep in mind that vma_needs_reservation() changes
5297 		 * resv->adds_in_progress if it succeeds. If this is not done,
5298 		 * do_exit() will not see it, and will keep the reservation
5299 		 * forever.
5300 		 */
5301 		if (adjust_reservation) {
5302 			int rc = vma_needs_reservation(h, vma, address);
5303 
5304 			if (rc < 0)
5305 				/* Pressumably allocate_file_region_entries failed
5306 				 * to allocate a file_region struct. Clear
5307 				 * hugetlb_restore_reserve so that global reserve
5308 				 * count will not be incremented by free_huge_folio.
5309 				 * Act as if we consumed the reservation.
5310 				 */
5311 				folio_clear_hugetlb_restore_reserve(folio);
5312 			else if (rc)
5313 				vma_add_reservation(h, vma, address);
5314 		}
5315 
5316 		tlb_remove_page_size(tlb, folio_page(folio, 0),
5317 				     folio_size(folio));
5318 		/*
5319 		 * If we were instructed to unmap a specific folio, we're done.
5320 		 */
5321 		if (folio_provided)
5322 			break;
5323 	}
5324 	tlb_end_vma(tlb, vma);
5325 
5326 	huge_pmd_unshare_flush(tlb, vma);
5327 }
5328 
5329 void __hugetlb_zap_begin(struct vm_area_struct *vma,
5330 			 unsigned long *start, unsigned long *end)
5331 {
5332 	if (!vma->vm_file)	/* hugetlbfs_file_mmap error */
5333 		return;
5334 
5335 	adjust_range_if_pmd_sharing_possible(vma, start, end);
5336 	hugetlb_vma_lock_write(vma);
5337 	if (vma->vm_file)
5338 		i_mmap_lock_write(vma->vm_file->f_mapping);
5339 }
5340 
5341 void __hugetlb_zap_end(struct vm_area_struct *vma,
5342 		       struct zap_details *details)
5343 {
5344 	zap_flags_t zap_flags = details ? details->zap_flags : 0;
5345 
5346 	if (!vma->vm_file)	/* hugetlbfs_file_mmap error */
5347 		return;
5348 
5349 	if (zap_flags & ZAP_FLAG_UNMAP) {	/* final unmap */
5350 		/*
5351 		 * Unlock and free the vma lock before releasing i_mmap_rwsem.
5352 		 * When the vma_lock is freed, this makes the vma ineligible
5353 		 * for pmd sharing.  And, i_mmap_rwsem is required to set up
5354 		 * pmd sharing.  This is important as page tables for this
5355 		 * unmapped range will be asynchrously deleted.  If the page
5356 		 * tables are shared, there will be issues when accessed by
5357 		 * someone else.
5358 		 */
5359 		__hugetlb_vma_unlock_write_free(vma);
5360 	} else {
5361 		hugetlb_vma_unlock_write(vma);
5362 	}
5363 
5364 	if (vma->vm_file)
5365 		i_mmap_unlock_write(vma->vm_file->f_mapping);
5366 }
5367 
5368 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
5369 			  unsigned long end, struct folio *folio,
5370 			  zap_flags_t zap_flags)
5371 {
5372 	struct mmu_notifier_range range;
5373 	struct mmu_gather tlb;
5374 
5375 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
5376 				start, end);
5377 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5378 	mmu_notifier_invalidate_range_start(&range);
5379 	tlb_gather_mmu(&tlb, vma->vm_mm);
5380 
5381 	__unmap_hugepage_range(&tlb, vma, start, end,
5382 			       folio, zap_flags);
5383 
5384 	mmu_notifier_invalidate_range_end(&range);
5385 	tlb_finish_mmu(&tlb);
5386 }
5387 
5388 /*
5389  * This is called when the original mapper is failing to COW a MAP_PRIVATE
5390  * mapping it owns the reserve page for. The intention is to unmap the page
5391  * from other VMAs and let the children be SIGKILLed if they are faulting the
5392  * same region.
5393  */
5394 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
5395 			      struct folio *folio, unsigned long address)
5396 {
5397 	struct hstate *h = hstate_vma(vma);
5398 	struct vm_area_struct *iter_vma;
5399 	struct address_space *mapping;
5400 	pgoff_t pgoff;
5401 
5402 	/*
5403 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
5404 	 * from page cache lookup which is in HPAGE_SIZE units.
5405 	 */
5406 	address = address & huge_page_mask(h);
5407 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
5408 			vma->vm_pgoff;
5409 	mapping = vma->vm_file->f_mapping;
5410 
5411 	/*
5412 	 * Take the mapping lock for the duration of the table walk. As
5413 	 * this mapping should be shared between all the VMAs,
5414 	 * __unmap_hugepage_range() is called as the lock is already held
5415 	 */
5416 	i_mmap_lock_write(mapping);
5417 	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
5418 		/* Do not unmap the current VMA */
5419 		if (iter_vma == vma)
5420 			continue;
5421 
5422 		/*
5423 		 * Shared VMAs have their own reserves and do not affect
5424 		 * MAP_PRIVATE accounting but it is possible that a shared
5425 		 * VMA is using the same page so check and skip such VMAs.
5426 		 */
5427 		if (iter_vma->vm_flags & VM_MAYSHARE)
5428 			continue;
5429 
5430 		/*
5431 		 * Unmap the page from other VMAs without their own reserves.
5432 		 * They get marked to be SIGKILLed if they fault in these
5433 		 * areas. This is because a future no-page fault on this VMA
5434 		 * could insert a zeroed page instead of the data existing
5435 		 * from the time of fork. This would look like data corruption
5436 		 */
5437 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
5438 			unmap_hugepage_range(iter_vma, address,
5439 					     address + huge_page_size(h),
5440 					     folio, 0);
5441 	}
5442 	i_mmap_unlock_write(mapping);
5443 }
5444 
5445 /*
5446  * hugetlb_wp() should be called with page lock of the original hugepage held.
5447  * Called with hugetlb_fault_mutex_table held and pte_page locked so we
5448  * cannot race with other handlers or page migration.
5449  * Keep the pte_same checks anyway to make transition from the mutex easier.
5450  */
5451 static vm_fault_t hugetlb_wp(struct vm_fault *vmf)
5452 {
5453 	struct vm_area_struct *vma = vmf->vma;
5454 	struct mm_struct *mm = vma->vm_mm;
5455 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
5456 	pte_t pte = huge_ptep_get(mm, vmf->address, vmf->pte);
5457 	struct hstate *h = hstate_vma(vma);
5458 	struct folio *old_folio;
5459 	struct folio *new_folio;
5460 	bool cow_from_owner = 0;
5461 	vm_fault_t ret = 0;
5462 	struct mmu_notifier_range range;
5463 
5464 	/*
5465 	 * Never handle CoW for uffd-wp protected pages.  It should be only
5466 	 * handled when the uffd-wp protection is removed.
5467 	 *
5468 	 * Note that only the CoW optimization path (in hugetlb_no_page())
5469 	 * can trigger this, because hugetlb_fault() will always resolve
5470 	 * uffd-wp bit first.
5471 	 */
5472 	if (!unshare && huge_pte_uffd_wp(pte))
5473 		return 0;
5474 
5475 	/* Let's take out MAP_SHARED mappings first. */
5476 	if (vma->vm_flags & VM_MAYSHARE) {
5477 		set_huge_ptep_writable(vma, vmf->address, vmf->pte);
5478 		return 0;
5479 	}
5480 
5481 	old_folio = page_folio(pte_page(pte));
5482 
5483 	delayacct_wpcopy_start();
5484 
5485 retry_avoidcopy:
5486 	/*
5487 	 * If no-one else is actually using this page, we're the exclusive
5488 	 * owner and can reuse this page.
5489 	 *
5490 	 * Note that we don't rely on the (safer) folio refcount here, because
5491 	 * copying the hugetlb folio when there are unexpected (temporary)
5492 	 * folio references could harm simple fork()+exit() users when
5493 	 * we run out of free hugetlb folios: we would have to kill processes
5494 	 * in scenarios that used to work. As a side effect, there can still
5495 	 * be leaks between processes, for example, with FOLL_GET users.
5496 	 */
5497 	if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) {
5498 		if (!PageAnonExclusive(&old_folio->page)) {
5499 			folio_move_anon_rmap(old_folio, vma);
5500 			SetPageAnonExclusive(&old_folio->page);
5501 		}
5502 		if (likely(!unshare))
5503 			set_huge_ptep_maybe_writable(vma, vmf->address,
5504 						     vmf->pte);
5505 
5506 		delayacct_wpcopy_end();
5507 		return 0;
5508 	}
5509 	VM_BUG_ON_PAGE(folio_test_anon(old_folio) &&
5510 		       PageAnonExclusive(&old_folio->page), &old_folio->page);
5511 
5512 	/*
5513 	 * If the process that created a MAP_PRIVATE mapping is about to perform
5514 	 * a COW due to a shared page count, attempt to satisfy the allocation
5515 	 * without using the existing reserves.
5516 	 * In order to determine where this is a COW on a MAP_PRIVATE mapping it
5517 	 * is enough to check whether the old_folio is anonymous. This means that
5518 	 * the reserve for this address was consumed. If reserves were used, a
5519 	 * partial faulted mapping at the fime of fork() could consume its reserves
5520 	 * on COW instead of the full address range.
5521 	 */
5522 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
5523 	    folio_test_anon(old_folio))
5524 		cow_from_owner = true;
5525 
5526 	folio_get(old_folio);
5527 
5528 	/*
5529 	 * Drop page table lock as buddy allocator may be called. It will
5530 	 * be acquired again before returning to the caller, as expected.
5531 	 */
5532 	spin_unlock(vmf->ptl);
5533 	new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner);
5534 
5535 	if (IS_ERR(new_folio)) {
5536 		/*
5537 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
5538 		 * it is due to references held by a child and an insufficient
5539 		 * huge page pool. To guarantee the original mappers
5540 		 * reliability, unmap the page from child processes. The child
5541 		 * may get SIGKILLed if it later faults.
5542 		 */
5543 		if (cow_from_owner) {
5544 			struct address_space *mapping = vma->vm_file->f_mapping;
5545 			pgoff_t idx;
5546 			u32 hash;
5547 
5548 			folio_put(old_folio);
5549 			/*
5550 			 * Drop hugetlb_fault_mutex and vma_lock before
5551 			 * unmapping.  unmapping needs to hold vma_lock
5552 			 * in write mode.  Dropping vma_lock in read mode
5553 			 * here is OK as COW mappings do not interact with
5554 			 * PMD sharing.
5555 			 *
5556 			 * Reacquire both after unmap operation.
5557 			 */
5558 			idx = vma_hugecache_offset(h, vma, vmf->address);
5559 			hash = hugetlb_fault_mutex_hash(mapping, idx);
5560 			hugetlb_vma_unlock_read(vma);
5561 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5562 
5563 			unmap_ref_private(mm, vma, old_folio, vmf->address);
5564 
5565 			mutex_lock(&hugetlb_fault_mutex_table[hash]);
5566 			hugetlb_vma_lock_read(vma);
5567 			spin_lock(vmf->ptl);
5568 			vmf->pte = hugetlb_walk(vma, vmf->address,
5569 					huge_page_size(h));
5570 			if (likely(vmf->pte &&
5571 				   pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte)))
5572 				goto retry_avoidcopy;
5573 			/*
5574 			 * race occurs while re-acquiring page table
5575 			 * lock, and our job is done.
5576 			 */
5577 			delayacct_wpcopy_end();
5578 			return 0;
5579 		}
5580 
5581 		ret = vmf_error(PTR_ERR(new_folio));
5582 		goto out_release_old;
5583 	}
5584 
5585 	/*
5586 	 * When the original hugepage is shared one, it does not have
5587 	 * anon_vma prepared.
5588 	 */
5589 	ret = __vmf_anon_prepare(vmf);
5590 	if (unlikely(ret))
5591 		goto out_release_all;
5592 
5593 	if (copy_user_large_folio(new_folio, old_folio, vmf->real_address, vma)) {
5594 		ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h));
5595 		goto out_release_all;
5596 	}
5597 	__folio_mark_uptodate(new_folio);
5598 
5599 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address,
5600 				vmf->address + huge_page_size(h));
5601 	mmu_notifier_invalidate_range_start(&range);
5602 
5603 	/*
5604 	 * Retake the page table lock to check for racing updates
5605 	 * before the page tables are altered
5606 	 */
5607 	spin_lock(vmf->ptl);
5608 	vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h));
5609 	if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) {
5610 		pte_t newpte = make_huge_pte(vma, new_folio, !unshare);
5611 
5612 		/* Break COW or unshare */
5613 		huge_ptep_clear_flush(vma, vmf->address, vmf->pte);
5614 		hugetlb_remove_rmap(old_folio);
5615 		hugetlb_add_new_anon_rmap(new_folio, vma, vmf->address);
5616 		if (huge_pte_uffd_wp(pte))
5617 			newpte = huge_pte_mkuffd_wp(newpte);
5618 		set_huge_pte_at(mm, vmf->address, vmf->pte, newpte,
5619 				huge_page_size(h));
5620 		folio_set_hugetlb_migratable(new_folio);
5621 		/* Make the old page be freed below */
5622 		new_folio = old_folio;
5623 	}
5624 	spin_unlock(vmf->ptl);
5625 	mmu_notifier_invalidate_range_end(&range);
5626 out_release_all:
5627 	/*
5628 	 * No restore in case of successful pagetable update (Break COW or
5629 	 * unshare)
5630 	 */
5631 	if (new_folio != old_folio)
5632 		restore_reserve_on_error(h, vma, vmf->address, new_folio);
5633 	folio_put(new_folio);
5634 out_release_old:
5635 	folio_put(old_folio);
5636 
5637 	spin_lock(vmf->ptl); /* Caller expects lock to be held */
5638 
5639 	delayacct_wpcopy_end();
5640 	return ret;
5641 }
5642 
5643 /*
5644  * Return whether there is a pagecache page to back given address within VMA.
5645  */
5646 bool hugetlbfs_pagecache_present(struct hstate *h,
5647 				 struct vm_area_struct *vma, unsigned long address)
5648 {
5649 	struct address_space *mapping = vma->vm_file->f_mapping;
5650 	pgoff_t idx = linear_page_index(vma, address);
5651 	struct folio *folio;
5652 
5653 	folio = filemap_get_folio(mapping, idx);
5654 	if (IS_ERR(folio))
5655 		return false;
5656 	folio_put(folio);
5657 	return true;
5658 }
5659 
5660 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
5661 			   pgoff_t idx)
5662 {
5663 	struct inode *inode = mapping->host;
5664 	struct hstate *h = hstate_inode(inode);
5665 	int err;
5666 
5667 	idx <<= huge_page_order(h);
5668 	__folio_set_locked(folio);
5669 	err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
5670 
5671 	if (unlikely(err)) {
5672 		__folio_clear_locked(folio);
5673 		return err;
5674 	}
5675 	folio_clear_hugetlb_restore_reserve(folio);
5676 
5677 	/*
5678 	 * mark folio dirty so that it will not be removed from cache/file
5679 	 * by non-hugetlbfs specific code paths.
5680 	 */
5681 	folio_mark_dirty(folio);
5682 
5683 	spin_lock(&inode->i_lock);
5684 	inode->i_blocks += blocks_per_huge_page(h);
5685 	spin_unlock(&inode->i_lock);
5686 	return 0;
5687 }
5688 
5689 static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf,
5690 						  struct address_space *mapping,
5691 						  unsigned long reason)
5692 {
5693 	u32 hash;
5694 
5695 	/*
5696 	 * vma_lock and hugetlb_fault_mutex must be dropped before handling
5697 	 * userfault. Also mmap_lock could be dropped due to handling
5698 	 * userfault, any vma operation should be careful from here.
5699 	 */
5700 	hugetlb_vma_unlock_read(vmf->vma);
5701 	hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
5702 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5703 	return handle_userfault(vmf, reason);
5704 }
5705 
5706 /*
5707  * Recheck pte with pgtable lock.  Returns true if pte didn't change, or
5708  * false if pte changed or is changing.
5709  */
5710 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr,
5711 			       pte_t *ptep, pte_t old_pte)
5712 {
5713 	spinlock_t *ptl;
5714 	bool same;
5715 
5716 	ptl = huge_pte_lock(h, mm, ptep);
5717 	same = pte_same(huge_ptep_get(mm, addr, ptep), old_pte);
5718 	spin_unlock(ptl);
5719 
5720 	return same;
5721 }
5722 
5723 static vm_fault_t hugetlb_no_page(struct address_space *mapping,
5724 			struct vm_fault *vmf)
5725 {
5726 	u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
5727 	bool new_folio, new_anon_folio = false;
5728 	struct vm_area_struct *vma = vmf->vma;
5729 	struct mm_struct *mm = vma->vm_mm;
5730 	struct hstate *h = hstate_vma(vma);
5731 	vm_fault_t ret = VM_FAULT_SIGBUS;
5732 	bool folio_locked = true;
5733 	struct folio *folio;
5734 	unsigned long size;
5735 	pte_t new_pte;
5736 
5737 	/*
5738 	 * Currently, we are forced to kill the process in the event the
5739 	 * original mapper has unmapped pages from the child due to a failed
5740 	 * COW/unsharing. Warn that such a situation has occurred as it may not
5741 	 * be obvious.
5742 	 */
5743 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
5744 		pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
5745 			   current->pid);
5746 		goto out;
5747 	}
5748 
5749 	/*
5750 	 * Use page lock to guard against racing truncation
5751 	 * before we get page_table_lock.
5752 	 */
5753 	new_folio = false;
5754 	folio = filemap_lock_hugetlb_folio(h, mapping, vmf->pgoff);
5755 	if (IS_ERR(folio)) {
5756 		size = i_size_read(mapping->host) >> huge_page_shift(h);
5757 		if (vmf->pgoff >= size)
5758 			goto out;
5759 		/* Check for page in userfault range */
5760 		if (userfaultfd_missing(vma)) {
5761 			/*
5762 			 * Since hugetlb_no_page() was examining pte
5763 			 * without pgtable lock, we need to re-test under
5764 			 * lock because the pte may not be stable and could
5765 			 * have changed from under us.  Try to detect
5766 			 * either changed or during-changing ptes and retry
5767 			 * properly when needed.
5768 			 *
5769 			 * Note that userfaultfd is actually fine with
5770 			 * false positives (e.g. caused by pte changed),
5771 			 * but not wrong logical events (e.g. caused by
5772 			 * reading a pte during changing).  The latter can
5773 			 * confuse the userspace, so the strictness is very
5774 			 * much preferred.  E.g., MISSING event should
5775 			 * never happen on the page after UFFDIO_COPY has
5776 			 * correctly installed the page and returned.
5777 			 */
5778 			if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
5779 				ret = 0;
5780 				goto out;
5781 			}
5782 
5783 			return hugetlb_handle_userfault(vmf, mapping,
5784 							VM_UFFD_MISSING);
5785 		}
5786 
5787 		if (!(vma->vm_flags & VM_MAYSHARE)) {
5788 			ret = __vmf_anon_prepare(vmf);
5789 			if (unlikely(ret))
5790 				goto out;
5791 		}
5792 
5793 		folio = alloc_hugetlb_folio(vma, vmf->address, false);
5794 		if (IS_ERR(folio)) {
5795 			/*
5796 			 * Returning error will result in faulting task being
5797 			 * sent SIGBUS.  The hugetlb fault mutex prevents two
5798 			 * tasks from racing to fault in the same page which
5799 			 * could result in false unable to allocate errors.
5800 			 * Page migration does not take the fault mutex, but
5801 			 * does a clear then write of pte's under page table
5802 			 * lock.  Page fault code could race with migration,
5803 			 * notice the clear pte and try to allocate a page
5804 			 * here.  Before returning error, get ptl and make
5805 			 * sure there really is no pte entry.
5806 			 */
5807 			if (hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte))
5808 				ret = vmf_error(PTR_ERR(folio));
5809 			else
5810 				ret = 0;
5811 			goto out;
5812 		}
5813 		folio_zero_user(folio, vmf->real_address);
5814 		__folio_mark_uptodate(folio);
5815 		new_folio = true;
5816 
5817 		if (vma->vm_flags & VM_MAYSHARE) {
5818 			int err = hugetlb_add_to_page_cache(folio, mapping,
5819 							vmf->pgoff);
5820 			if (err) {
5821 				/*
5822 				 * err can't be -EEXIST which implies someone
5823 				 * else consumed the reservation since hugetlb
5824 				 * fault mutex is held when add a hugetlb page
5825 				 * to the page cache. So it's safe to call
5826 				 * restore_reserve_on_error() here.
5827 				 */
5828 				restore_reserve_on_error(h, vma, vmf->address,
5829 							folio);
5830 				folio_put(folio);
5831 				ret = VM_FAULT_SIGBUS;
5832 				goto out;
5833 			}
5834 		} else {
5835 			new_anon_folio = true;
5836 			folio_lock(folio);
5837 		}
5838 	} else {
5839 		/*
5840 		 * If memory error occurs between mmap() and fault, some process
5841 		 * don't have hwpoisoned swap entry for errored virtual address.
5842 		 * So we need to block hugepage fault by PG_hwpoison bit check.
5843 		 */
5844 		if (unlikely(folio_test_hwpoison(folio))) {
5845 			ret = VM_FAULT_HWPOISON_LARGE |
5846 				VM_FAULT_SET_HINDEX(hstate_index(h));
5847 			goto backout_unlocked;
5848 		}
5849 
5850 		/* Check for page in userfault range. */
5851 		if (userfaultfd_minor(vma)) {
5852 			folio_unlock(folio);
5853 			folio_put(folio);
5854 			/* See comment in userfaultfd_missing() block above */
5855 			if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
5856 				ret = 0;
5857 				goto out;
5858 			}
5859 			return hugetlb_handle_userfault(vmf, mapping,
5860 							VM_UFFD_MINOR);
5861 		}
5862 	}
5863 
5864 	/*
5865 	 * If we are going to COW a private mapping later, we examine the
5866 	 * pending reservations for this page now. This will ensure that
5867 	 * any allocations necessary to record that reservation occur outside
5868 	 * the spinlock.
5869 	 */
5870 	if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5871 		if (vma_needs_reservation(h, vma, vmf->address) < 0) {
5872 			ret = VM_FAULT_OOM;
5873 			goto backout_unlocked;
5874 		}
5875 		/* Just decrements count, does not deallocate */
5876 		vma_end_reservation(h, vma, vmf->address);
5877 	}
5878 
5879 	vmf->ptl = huge_pte_lock(h, mm, vmf->pte);
5880 	ret = 0;
5881 	/* If pte changed from under us, retry */
5882 	if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte))
5883 		goto backout;
5884 
5885 	if (new_anon_folio)
5886 		hugetlb_add_new_anon_rmap(folio, vma, vmf->address);
5887 	else
5888 		hugetlb_add_file_rmap(folio);
5889 	new_pte = make_huge_pte(vma, folio, vma->vm_flags & VM_SHARED);
5890 	/*
5891 	 * If this pte was previously wr-protected, keep it wr-protected even
5892 	 * if populated.
5893 	 */
5894 	if (unlikely(pte_is_uffd_wp_marker(vmf->orig_pte)))
5895 		new_pte = huge_pte_mkuffd_wp(new_pte);
5896 	set_huge_pte_at(mm, vmf->address, vmf->pte, new_pte, huge_page_size(h));
5897 
5898 	hugetlb_count_add(pages_per_huge_page(h), mm);
5899 	if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5900 		/*
5901 		 * No need to keep file folios locked. See comment in
5902 		 * hugetlb_fault().
5903 		 */
5904 		if (!new_anon_folio) {
5905 			folio_locked = false;
5906 			folio_unlock(folio);
5907 		}
5908 		/* Optimization, do the COW without a second fault */
5909 		ret = hugetlb_wp(vmf);
5910 	}
5911 
5912 	spin_unlock(vmf->ptl);
5913 
5914 	/*
5915 	 * Only set hugetlb_migratable in newly allocated pages.  Existing pages
5916 	 * found in the pagecache may not have hugetlb_migratable if they have
5917 	 * been isolated for migration.
5918 	 */
5919 	if (new_folio)
5920 		folio_set_hugetlb_migratable(folio);
5921 
5922 	if (folio_locked)
5923 		folio_unlock(folio);
5924 out:
5925 	hugetlb_vma_unlock_read(vma);
5926 
5927 	/*
5928 	 * We must check to release the per-VMA lock. __vmf_anon_prepare() is
5929 	 * the only way ret can be set to VM_FAULT_RETRY.
5930 	 */
5931 	if (unlikely(ret & VM_FAULT_RETRY))
5932 		vma_end_read(vma);
5933 
5934 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5935 	return ret;
5936 
5937 backout:
5938 	spin_unlock(vmf->ptl);
5939 backout_unlocked:
5940 	/* We only need to restore reservations for private mappings */
5941 	if (new_anon_folio)
5942 		restore_reserve_on_error(h, vma, vmf->address, folio);
5943 
5944 	folio_unlock(folio);
5945 	folio_put(folio);
5946 	goto out;
5947 }
5948 
5949 #ifdef CONFIG_SMP
5950 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
5951 {
5952 	unsigned long key[2];
5953 	u32 hash;
5954 
5955 	key[0] = (unsigned long) mapping;
5956 	key[1] = idx;
5957 
5958 	hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
5959 
5960 	return hash & (num_fault_mutexes - 1);
5961 }
5962 #else
5963 /*
5964  * For uniprocessor systems we always use a single mutex, so just
5965  * return 0 and avoid the hashing overhead.
5966  */
5967 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
5968 {
5969 	return 0;
5970 }
5971 #endif
5972 
5973 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
5974 			unsigned long address, unsigned int flags)
5975 {
5976 	vm_fault_t ret;
5977 	u32 hash;
5978 	struct folio *folio = NULL;
5979 	struct hstate *h = hstate_vma(vma);
5980 	struct address_space *mapping;
5981 	bool need_wait_lock = false;
5982 	struct vm_fault vmf = {
5983 		.vma = vma,
5984 		.address = address & huge_page_mask(h),
5985 		.real_address = address,
5986 		.flags = flags,
5987 		.pgoff = vma_hugecache_offset(h, vma,
5988 				address & huge_page_mask(h)),
5989 		/* TODO: Track hugetlb faults using vm_fault */
5990 
5991 		/*
5992 		 * Some fields may not be initialized, be careful as it may
5993 		 * be hard to debug if called functions make assumptions
5994 		 */
5995 	};
5996 
5997 	/*
5998 	 * Serialize hugepage allocation and instantiation, so that we don't
5999 	 * get spurious allocation failures if two CPUs race to instantiate
6000 	 * the same page in the page cache.
6001 	 */
6002 	mapping = vma->vm_file->f_mapping;
6003 	hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff);
6004 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
6005 
6006 	/*
6007 	 * Acquire vma lock before calling huge_pte_alloc and hold
6008 	 * until finished with vmf.pte.  This prevents huge_pmd_unshare from
6009 	 * being called elsewhere and making the vmf.pte no longer valid.
6010 	 */
6011 	hugetlb_vma_lock_read(vma);
6012 	vmf.pte = huge_pte_alloc(mm, vma, vmf.address, huge_page_size(h));
6013 	if (!vmf.pte) {
6014 		hugetlb_vma_unlock_read(vma);
6015 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6016 		return VM_FAULT_OOM;
6017 	}
6018 
6019 	vmf.orig_pte = huge_ptep_get(mm, vmf.address, vmf.pte);
6020 	if (huge_pte_none(vmf.orig_pte))
6021 		/*
6022 		 * hugetlb_no_page will drop vma lock and hugetlb fault
6023 		 * mutex internally, which make us return immediately.
6024 		 */
6025 		return hugetlb_no_page(mapping, &vmf);
6026 
6027 	if (pte_is_marker(vmf.orig_pte)) {
6028 		const pte_marker marker =
6029 			softleaf_to_marker(softleaf_from_pte(vmf.orig_pte));
6030 
6031 		if (marker & PTE_MARKER_POISONED) {
6032 			ret = VM_FAULT_HWPOISON_LARGE |
6033 				VM_FAULT_SET_HINDEX(hstate_index(h));
6034 			goto out_mutex;
6035 		} else if (WARN_ON_ONCE(marker & PTE_MARKER_GUARD)) {
6036 			/* This isn't supported in hugetlb. */
6037 			ret = VM_FAULT_SIGSEGV;
6038 			goto out_mutex;
6039 		}
6040 
6041 		return hugetlb_no_page(mapping, &vmf);
6042 	}
6043 
6044 	ret = 0;
6045 
6046 	/* Not present, either a migration or a hwpoisoned entry */
6047 	if (!pte_present(vmf.orig_pte) && !huge_pte_none(vmf.orig_pte)) {
6048 		const softleaf_t softleaf = softleaf_from_pte(vmf.orig_pte);
6049 
6050 		if (softleaf_is_migration(softleaf)) {
6051 			/*
6052 			 * Release the hugetlb fault lock now, but retain
6053 			 * the vma lock, because it is needed to guard the
6054 			 * huge_pte_lockptr() later in
6055 			 * migration_entry_wait_huge(). The vma lock will
6056 			 * be released there.
6057 			 */
6058 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6059 			migration_entry_wait_huge(vma, vmf.address, vmf.pte);
6060 			return 0;
6061 		}
6062 		if (softleaf_is_hwpoison(softleaf)) {
6063 			ret = VM_FAULT_HWPOISON_LARGE |
6064 			    VM_FAULT_SET_HINDEX(hstate_index(h));
6065 		}
6066 
6067 		goto out_mutex;
6068 	}
6069 
6070 	/*
6071 	 * If we are going to COW/unshare the mapping later, we examine the
6072 	 * pending reservations for this page now. This will ensure that any
6073 	 * allocations necessary to record that reservation occur outside the
6074 	 * spinlock.
6075 	 */
6076 	if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
6077 	    !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(vmf.orig_pte)) {
6078 		if (vma_needs_reservation(h, vma, vmf.address) < 0) {
6079 			ret = VM_FAULT_OOM;
6080 			goto out_mutex;
6081 		}
6082 		/* Just decrements count, does not deallocate */
6083 		vma_end_reservation(h, vma, vmf.address);
6084 	}
6085 
6086 	vmf.ptl = huge_pte_lock(h, mm, vmf.pte);
6087 
6088 	/* Check for a racing update before calling hugetlb_wp() */
6089 	if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(mm, vmf.address, vmf.pte))))
6090 		goto out_ptl;
6091 
6092 	/* Handle userfault-wp first, before trying to lock more pages */
6093 	if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(mm, vmf.address, vmf.pte)) &&
6094 	    (flags & FAULT_FLAG_WRITE) && !huge_pte_write(vmf.orig_pte)) {
6095 		if (!userfaultfd_wp_async(vma)) {
6096 			spin_unlock(vmf.ptl);
6097 			hugetlb_vma_unlock_read(vma);
6098 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6099 			return handle_userfault(&vmf, VM_UFFD_WP);
6100 		}
6101 
6102 		vmf.orig_pte = huge_pte_clear_uffd_wp(vmf.orig_pte);
6103 		set_huge_pte_at(mm, vmf.address, vmf.pte, vmf.orig_pte,
6104 				huge_page_size(hstate_vma(vma)));
6105 		/* Fallthrough to CoW */
6106 	}
6107 
6108 	if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
6109 		if (!huge_pte_write(vmf.orig_pte)) {
6110 			/*
6111 			 * Anonymous folios need to be lock since hugetlb_wp()
6112 			 * checks whether we can re-use the folio exclusively
6113 			 * for us in case we are the only user of it.
6114 			 */
6115 			folio = page_folio(pte_page(vmf.orig_pte));
6116 			if (folio_test_anon(folio) && !folio_trylock(folio)) {
6117 				need_wait_lock = true;
6118 				goto out_ptl;
6119 			}
6120 			folio_get(folio);
6121 			ret = hugetlb_wp(&vmf);
6122 			if (folio_test_anon(folio))
6123 				folio_unlock(folio);
6124 			folio_put(folio);
6125 			goto out_ptl;
6126 		} else if (likely(flags & FAULT_FLAG_WRITE)) {
6127 			vmf.orig_pte = huge_pte_mkdirty(vmf.orig_pte);
6128 		}
6129 	}
6130 	vmf.orig_pte = pte_mkyoung(vmf.orig_pte);
6131 	if (huge_ptep_set_access_flags(vma, vmf.address, vmf.pte, vmf.orig_pte,
6132 						flags & FAULT_FLAG_WRITE))
6133 		update_mmu_cache(vma, vmf.address, vmf.pte);
6134 out_ptl:
6135 	spin_unlock(vmf.ptl);
6136 out_mutex:
6137 	hugetlb_vma_unlock_read(vma);
6138 
6139 	/*
6140 	 * We must check to release the per-VMA lock. __vmf_anon_prepare() in
6141 	 * hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY.
6142 	 */
6143 	if (unlikely(ret & VM_FAULT_RETRY))
6144 		vma_end_read(vma);
6145 
6146 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6147 	/*
6148 	 * hugetlb_wp drops all the locks, but the folio lock, before trying to
6149 	 * unmap the folio from other processes. During that window, if another
6150 	 * process mapping that folio faults in, it will take the mutex and then
6151 	 * it will wait on folio_lock, causing an ABBA deadlock.
6152 	 * Use trylock instead and bail out if we fail.
6153 	 *
6154 	 * Ideally, we should hold a refcount on the folio we wait for, but we do
6155 	 * not want to use the folio after it becomes unlocked, but rather just
6156 	 * wait for it to become unlocked, so hopefully next fault successes on
6157 	 * the trylock.
6158 	 */
6159 	if (need_wait_lock)
6160 		folio_wait_locked(folio);
6161 	return ret;
6162 }
6163 
6164 #ifdef CONFIG_USERFAULTFD
6165 /*
6166  * Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte().
6167  */
6168 static struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
6169 		struct vm_area_struct *vma, unsigned long address)
6170 {
6171 	struct mempolicy *mpol;
6172 	nodemask_t *nodemask;
6173 	struct folio *folio;
6174 	gfp_t gfp_mask;
6175 	int node;
6176 
6177 	gfp_mask = htlb_alloc_mask(h);
6178 	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
6179 	/*
6180 	 * This is used to allocate a temporary hugetlb to hold the copied
6181 	 * content, which will then be copied again to the final hugetlb
6182 	 * consuming a reservation. Set the alloc_fallback to false to indicate
6183 	 * that breaking the per-node hugetlb pool is not allowed in this case.
6184 	 */
6185 	folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask, false);
6186 	mpol_cond_put(mpol);
6187 
6188 	return folio;
6189 }
6190 
6191 /*
6192  * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
6193  * with modifications for hugetlb pages.
6194  */
6195 int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
6196 			     struct vm_area_struct *dst_vma,
6197 			     unsigned long dst_addr,
6198 			     unsigned long src_addr,
6199 			     uffd_flags_t flags,
6200 			     struct folio **foliop)
6201 {
6202 	struct mm_struct *dst_mm = dst_vma->vm_mm;
6203 	bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
6204 	bool wp_enabled = (flags & MFILL_ATOMIC_WP);
6205 	struct hstate *h = hstate_vma(dst_vma);
6206 	struct address_space *mapping = dst_vma->vm_file->f_mapping;
6207 	pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
6208 	unsigned long size = huge_page_size(h);
6209 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
6210 	pte_t _dst_pte;
6211 	spinlock_t *ptl;
6212 	int ret = -ENOMEM;
6213 	struct folio *folio;
6214 	bool folio_in_pagecache = false;
6215 	pte_t dst_ptep;
6216 
6217 	if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
6218 		ptl = huge_pte_lock(h, dst_mm, dst_pte);
6219 
6220 		/* Don't overwrite any existing PTEs (even markers) */
6221 		if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
6222 			spin_unlock(ptl);
6223 			return -EEXIST;
6224 		}
6225 
6226 		_dst_pte = make_pte_marker(PTE_MARKER_POISONED);
6227 		set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
6228 
6229 		/* No need to invalidate - it was non-present before */
6230 		update_mmu_cache(dst_vma, dst_addr, dst_pte);
6231 
6232 		spin_unlock(ptl);
6233 		return 0;
6234 	}
6235 
6236 	if (is_continue) {
6237 		ret = -EFAULT;
6238 		folio = filemap_lock_hugetlb_folio(h, mapping, idx);
6239 		if (IS_ERR(folio))
6240 			goto out;
6241 		folio_in_pagecache = true;
6242 	} else if (!*foliop) {
6243 		/* If a folio already exists, then it's UFFDIO_COPY for
6244 		 * a non-missing case. Return -EEXIST.
6245 		 */
6246 		if (vm_shared &&
6247 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6248 			ret = -EEXIST;
6249 			goto out;
6250 		}
6251 
6252 		folio = alloc_hugetlb_folio(dst_vma, dst_addr, false);
6253 		if (IS_ERR(folio)) {
6254 			pte_t *actual_pte = hugetlb_walk(dst_vma, dst_addr, PMD_SIZE);
6255 			if (actual_pte) {
6256 				ret = -EEXIST;
6257 				goto out;
6258 			}
6259 			ret = -ENOMEM;
6260 			goto out;
6261 		}
6262 
6263 		ret = copy_folio_from_user(folio, (const void __user *) src_addr,
6264 					   false);
6265 
6266 		/* fallback to copy_from_user outside mmap_lock */
6267 		if (unlikely(ret)) {
6268 			ret = -ENOENT;
6269 			/* Free the allocated folio which may have
6270 			 * consumed a reservation.
6271 			 */
6272 			restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6273 			folio_put(folio);
6274 
6275 			/* Allocate a temporary folio to hold the copied
6276 			 * contents.
6277 			 */
6278 			folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr);
6279 			if (!folio) {
6280 				ret = -ENOMEM;
6281 				goto out;
6282 			}
6283 			*foliop = folio;
6284 			/* Set the outparam foliop and return to the caller to
6285 			 * copy the contents outside the lock. Don't free the
6286 			 * folio.
6287 			 */
6288 			goto out;
6289 		}
6290 	} else {
6291 		if (vm_shared &&
6292 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6293 			folio_put(*foliop);
6294 			ret = -EEXIST;
6295 			*foliop = NULL;
6296 			goto out;
6297 		}
6298 
6299 		folio = alloc_hugetlb_folio(dst_vma, dst_addr, false);
6300 		if (IS_ERR(folio)) {
6301 			folio_put(*foliop);
6302 			ret = -ENOMEM;
6303 			*foliop = NULL;
6304 			goto out;
6305 		}
6306 		ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
6307 		folio_put(*foliop);
6308 		*foliop = NULL;
6309 		if (ret) {
6310 			folio_put(folio);
6311 			goto out;
6312 		}
6313 	}
6314 
6315 	/*
6316 	 * If we just allocated a new page, we need a memory barrier to ensure
6317 	 * that preceding stores to the page become visible before the
6318 	 * set_pte_at() write. The memory barrier inside __folio_mark_uptodate
6319 	 * is what we need.
6320 	 *
6321 	 * In the case where we have not allocated a new page (is_continue),
6322 	 * the page must already be uptodate. UFFDIO_CONTINUE already includes
6323 	 * an earlier smp_wmb() to ensure that prior stores will be visible
6324 	 * before the set_pte_at() write.
6325 	 */
6326 	if (!is_continue)
6327 		__folio_mark_uptodate(folio);
6328 	else
6329 		WARN_ON_ONCE(!folio_test_uptodate(folio));
6330 
6331 	/* Add shared, newly allocated pages to the page cache. */
6332 	if (vm_shared && !is_continue) {
6333 		ret = -EFAULT;
6334 		if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h)))
6335 			goto out_release_nounlock;
6336 
6337 		/*
6338 		 * Serialization between remove_inode_hugepages() and
6339 		 * hugetlb_add_to_page_cache() below happens through the
6340 		 * hugetlb_fault_mutex_table that here must be hold by
6341 		 * the caller.
6342 		 */
6343 		ret = hugetlb_add_to_page_cache(folio, mapping, idx);
6344 		if (ret)
6345 			goto out_release_nounlock;
6346 		folio_in_pagecache = true;
6347 	}
6348 
6349 	ptl = huge_pte_lock(h, dst_mm, dst_pte);
6350 
6351 	ret = -EIO;
6352 	if (folio_test_hwpoison(folio))
6353 		goto out_release_unlock;
6354 
6355 	ret = -EEXIST;
6356 
6357 	dst_ptep = huge_ptep_get(dst_mm, dst_addr, dst_pte);
6358 	/*
6359 	 * See comment about UFFD marker overwriting in
6360 	 * mfill_atomic_install_pte().
6361 	 */
6362 	if (!huge_pte_none(dst_ptep) && !pte_is_uffd_marker(dst_ptep))
6363 		goto out_release_unlock;
6364 
6365 	if (folio_in_pagecache)
6366 		hugetlb_add_file_rmap(folio);
6367 	else
6368 		hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr);
6369 
6370 	/*
6371 	 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
6372 	 * with wp flag set, don't set pte write bit.
6373 	 */
6374 	_dst_pte = make_huge_pte(dst_vma, folio,
6375 				 !wp_enabled && !(is_continue && !vm_shared));
6376 	/*
6377 	 * Always mark UFFDIO_COPY page dirty; note that this may not be
6378 	 * extremely important for hugetlbfs for now since swapping is not
6379 	 * supported, but we should still be clear in that this page cannot be
6380 	 * thrown away at will, even if write bit not set.
6381 	 */
6382 	_dst_pte = huge_pte_mkdirty(_dst_pte);
6383 	_dst_pte = pte_mkyoung(_dst_pte);
6384 
6385 	if (wp_enabled)
6386 		_dst_pte = huge_pte_mkuffd_wp(_dst_pte);
6387 
6388 	set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
6389 
6390 	hugetlb_count_add(pages_per_huge_page(h), dst_mm);
6391 
6392 	/* No need to invalidate - it was non-present before */
6393 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
6394 
6395 	spin_unlock(ptl);
6396 	if (!is_continue)
6397 		folio_set_hugetlb_migratable(folio);
6398 	if (vm_shared || is_continue)
6399 		folio_unlock(folio);
6400 	ret = 0;
6401 out:
6402 	return ret;
6403 out_release_unlock:
6404 	spin_unlock(ptl);
6405 	if (vm_shared || is_continue)
6406 		folio_unlock(folio);
6407 out_release_nounlock:
6408 	if (!folio_in_pagecache)
6409 		restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6410 	folio_put(folio);
6411 	goto out;
6412 }
6413 #endif /* CONFIG_USERFAULTFD */
6414 
6415 long hugetlb_change_protection(struct vm_area_struct *vma,
6416 		unsigned long address, unsigned long end,
6417 		pgprot_t newprot, unsigned long cp_flags)
6418 {
6419 	struct mm_struct *mm = vma->vm_mm;
6420 	unsigned long start = address;
6421 	pte_t *ptep;
6422 	pte_t pte;
6423 	struct hstate *h = hstate_vma(vma);
6424 	long pages = 0, psize = huge_page_size(h);
6425 	struct mmu_notifier_range range;
6426 	unsigned long last_addr_mask;
6427 	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
6428 	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
6429 	struct mmu_gather tlb;
6430 
6431 	/*
6432 	 * In the case of shared PMDs, the area to flush could be beyond
6433 	 * start/end.  Set range.start/range.end to cover the maximum possible
6434 	 * range if PMD sharing is possible.
6435 	 */
6436 	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
6437 				0, mm, start, end);
6438 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
6439 
6440 	BUG_ON(address >= end);
6441 	flush_cache_range(vma, range.start, range.end);
6442 	tlb_gather_mmu_vma(&tlb, vma);
6443 
6444 	mmu_notifier_invalidate_range_start(&range);
6445 	hugetlb_vma_lock_write(vma);
6446 	i_mmap_lock_write(vma->vm_file->f_mapping);
6447 	last_addr_mask = hugetlb_mask_last_page(h);
6448 	for (; address < end; address += psize) {
6449 		softleaf_t entry;
6450 		spinlock_t *ptl;
6451 
6452 		ptep = hugetlb_walk(vma, address, psize);
6453 		if (!ptep) {
6454 			if (!uffd_wp) {
6455 				address |= last_addr_mask;
6456 				continue;
6457 			}
6458 			/*
6459 			 * Userfaultfd wr-protect requires pgtable
6460 			 * pre-allocations to install pte markers.
6461 			 */
6462 			ptep = huge_pte_alloc(mm, vma, address, psize);
6463 			if (!ptep) {
6464 				pages = -ENOMEM;
6465 				break;
6466 			}
6467 		}
6468 		ptl = huge_pte_lock(h, mm, ptep);
6469 		if (huge_pmd_unshare(&tlb, vma, address, ptep)) {
6470 			/*
6471 			 * When uffd-wp is enabled on the vma, unshare
6472 			 * shouldn't happen at all.  Warn about it if it
6473 			 * happened due to some reason.
6474 			 */
6475 			WARN_ON_ONCE(uffd_wp || uffd_wp_resolve);
6476 			pages++;
6477 			spin_unlock(ptl);
6478 			address |= last_addr_mask;
6479 			continue;
6480 		}
6481 		pte = huge_ptep_get(mm, address, ptep);
6482 		if (huge_pte_none(pte)) {
6483 			if (unlikely(uffd_wp))
6484 				/* Safe to modify directly (none->non-present). */
6485 				set_huge_pte_at(mm, address, ptep,
6486 						make_pte_marker(PTE_MARKER_UFFD_WP),
6487 						psize);
6488 			goto next;
6489 		}
6490 
6491 		entry = softleaf_from_pte(pte);
6492 		if (unlikely(softleaf_is_hwpoison(entry))) {
6493 			/* Nothing to do. */
6494 		} else if (unlikely(softleaf_is_migration(entry))) {
6495 			struct folio *folio = softleaf_to_folio(entry);
6496 			pte_t newpte = pte;
6497 
6498 			if (softleaf_is_migration_write(entry)) {
6499 				if (folio_test_anon(folio))
6500 					entry = make_readable_exclusive_migration_entry(
6501 								swp_offset(entry));
6502 				else
6503 					entry = make_readable_migration_entry(
6504 								swp_offset(entry));
6505 				newpte = swp_entry_to_pte(entry);
6506 				pages++;
6507 			}
6508 
6509 			if (uffd_wp)
6510 				newpte = pte_swp_mkuffd_wp(newpte);
6511 			else if (uffd_wp_resolve)
6512 				newpte = pte_swp_clear_uffd_wp(newpte);
6513 			if (!pte_same(pte, newpte))
6514 				set_huge_pte_at(mm, address, ptep, newpte, psize);
6515 		} else if (unlikely(pte_is_marker(pte))) {
6516 			/*
6517 			 * Do nothing on a poison marker; page is
6518 			 * corrupted, permissions do not apply. Here
6519 			 * pte_marker_uffd_wp()==true implies !poison
6520 			 * because they're mutual exclusive.
6521 			 */
6522 			if (pte_is_uffd_wp_marker(pte) && uffd_wp_resolve)
6523 				/* Safe to modify directly (non-present->none). */
6524 				huge_pte_clear(mm, address, ptep, psize);
6525 		} else {
6526 			pte_t old_pte;
6527 			unsigned int shift = huge_page_shift(hstate_vma(vma));
6528 
6529 			old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
6530 			pte = huge_pte_modify(old_pte, newprot);
6531 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
6532 			if (uffd_wp)
6533 				pte = huge_pte_mkuffd_wp(pte);
6534 			else if (uffd_wp_resolve)
6535 				pte = huge_pte_clear_uffd_wp(pte);
6536 			huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
6537 			pages++;
6538 			tlb_remove_huge_tlb_entry(h, &tlb, ptep, address);
6539 		}
6540 
6541 next:
6542 		spin_unlock(ptl);
6543 		cond_resched();
6544 	}
6545 
6546 	tlb_flush_mmu_tlbonly(&tlb);
6547 	huge_pmd_unshare_flush(&tlb, vma);
6548 	/*
6549 	 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are
6550 	 * downgrading page table protection not changing it to point to a new
6551 	 * page.
6552 	 *
6553 	 * See Documentation/mm/mmu_notifier.rst
6554 	 */
6555 	i_mmap_unlock_write(vma->vm_file->f_mapping);
6556 	hugetlb_vma_unlock_write(vma);
6557 	mmu_notifier_invalidate_range_end(&range);
6558 	tlb_finish_mmu(&tlb);
6559 
6560 	return pages > 0 ? (pages << h->order) : pages;
6561 }
6562 
6563 /*
6564  * Update the reservation map for the range [from, to].
6565  *
6566  * Returns the number of entries that would be added to the reservation map
6567  * associated with the range [from, to].  This number is greater or equal to
6568  * zero. -EINVAL or -ENOMEM is returned in case of any errors.
6569  */
6570 
6571 long hugetlb_reserve_pages(struct inode *inode,
6572 		long from, long to,
6573 		struct vm_area_desc *desc,
6574 		vm_flags_t vm_flags)
6575 {
6576 	long chg = -1, add = -1, spool_resv, gbl_resv;
6577 	struct hstate *h = hstate_inode(inode);
6578 	struct hugepage_subpool *spool = subpool_inode(inode);
6579 	struct resv_map *resv_map;
6580 	struct hugetlb_cgroup *h_cg = NULL;
6581 	long gbl_reserve, regions_needed = 0;
6582 	int err;
6583 
6584 	/* This should never happen */
6585 	if (from > to) {
6586 		VM_WARN(1, "%s called with a negative range\n", __func__);
6587 		return -EINVAL;
6588 	}
6589 
6590 	/*
6591 	 * Only apply hugepage reservation if asked. At fault time, an
6592 	 * attempt will be made for VM_NORESERVE to allocate a page
6593 	 * without using reserves
6594 	 */
6595 	if (vm_flags & VM_NORESERVE)
6596 		return 0;
6597 
6598 	/*
6599 	 * Shared mappings base their reservation on the number of pages that
6600 	 * are already allocated on behalf of the file. Private mappings need
6601 	 * to reserve the full area even if read-only as mprotect() may be
6602 	 * called to make the mapping read-write. Assume !desc is a shm mapping
6603 	 */
6604 	if (!desc || desc->vm_flags & VM_MAYSHARE) {
6605 		/*
6606 		 * resv_map can not be NULL as hugetlb_reserve_pages is only
6607 		 * called for inodes for which resv_maps were created (see
6608 		 * hugetlbfs_get_inode).
6609 		 */
6610 		resv_map = inode_resv_map(inode);
6611 
6612 		chg = region_chg(resv_map, from, to, &regions_needed);
6613 	} else {
6614 		/* Private mapping. */
6615 		resv_map = resv_map_alloc();
6616 		if (!resv_map) {
6617 			err = -ENOMEM;
6618 			goto out_err;
6619 		}
6620 
6621 		chg = to - from;
6622 
6623 		set_vma_desc_resv_map(desc, resv_map);
6624 		set_vma_desc_resv_flags(desc, HPAGE_RESV_OWNER);
6625 	}
6626 
6627 	if (chg < 0) {
6628 		/* region_chg() above can return -ENOMEM */
6629 		err = (chg == -ENOMEM) ? -ENOMEM : -EINVAL;
6630 		goto out_err;
6631 	}
6632 
6633 	err = hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
6634 				chg * pages_per_huge_page(h), &h_cg);
6635 	if (err < 0)
6636 		goto out_err;
6637 
6638 	if (desc && !(desc->vm_flags & VM_MAYSHARE) && h_cg) {
6639 		/* For private mappings, the hugetlb_cgroup uncharge info hangs
6640 		 * of the resv_map.
6641 		 */
6642 		resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
6643 	}
6644 
6645 	/*
6646 	 * There must be enough pages in the subpool for the mapping. If
6647 	 * the subpool has a minimum size, there may be some global
6648 	 * reservations already in place (gbl_reserve).
6649 	 */
6650 	gbl_reserve = hugepage_subpool_get_pages(spool, chg);
6651 	if (gbl_reserve < 0) {
6652 		err = gbl_reserve;
6653 		goto out_uncharge_cgroup;
6654 	}
6655 
6656 	/*
6657 	 * Check enough hugepages are available for the reservation.
6658 	 * Hand the pages back to the subpool if there are not
6659 	 */
6660 	err = hugetlb_acct_memory(h, gbl_reserve);
6661 	if (err < 0)
6662 		goto out_put_pages;
6663 
6664 	/*
6665 	 * Account for the reservations made. Shared mappings record regions
6666 	 * that have reservations as they are shared by multiple VMAs.
6667 	 * When the last VMA disappears, the region map says how much
6668 	 * the reservation was and the page cache tells how much of
6669 	 * the reservation was consumed. Private mappings are per-VMA and
6670 	 * only the consumed reservations are tracked. When the VMA
6671 	 * disappears, the original reservation is the VMA size and the
6672 	 * consumed reservations are stored in the map. Hence, nothing
6673 	 * else has to be done for private mappings here
6674 	 */
6675 	if (!desc || desc->vm_flags & VM_MAYSHARE) {
6676 		add = region_add(resv_map, from, to, regions_needed, h, h_cg);
6677 
6678 		if (unlikely(add < 0)) {
6679 			hugetlb_acct_memory(h, -gbl_reserve);
6680 			err = add;
6681 			goto out_put_pages;
6682 		} else if (unlikely(chg > add)) {
6683 			/*
6684 			 * pages in this range were added to the reserve
6685 			 * map between region_chg and region_add.  This
6686 			 * indicates a race with alloc_hugetlb_folio.  Adjust
6687 			 * the subpool and reserve counts modified above
6688 			 * based on the difference.
6689 			 */
6690 			long rsv_adjust;
6691 
6692 			/*
6693 			 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
6694 			 * reference to h_cg->css. See comment below for detail.
6695 			 */
6696 			hugetlb_cgroup_uncharge_cgroup_rsvd(
6697 				hstate_index(h),
6698 				(chg - add) * pages_per_huge_page(h), h_cg);
6699 
6700 			rsv_adjust = hugepage_subpool_put_pages(spool,
6701 								chg - add);
6702 			hugetlb_acct_memory(h, -rsv_adjust);
6703 		} else if (h_cg) {
6704 			/*
6705 			 * The file_regions will hold their own reference to
6706 			 * h_cg->css. So we should release the reference held
6707 			 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
6708 			 * done.
6709 			 */
6710 			hugetlb_cgroup_put_rsvd_cgroup(h_cg);
6711 		}
6712 	}
6713 	return chg;
6714 
6715 out_put_pages:
6716 	spool_resv = chg - gbl_reserve;
6717 	if (spool_resv) {
6718 		/* put sub pool's reservation back, chg - gbl_reserve */
6719 		gbl_resv = hugepage_subpool_put_pages(spool, spool_resv);
6720 		/*
6721 		 * subpool's reserved pages can not be put back due to race,
6722 		 * return to hstate.
6723 		 */
6724 		hugetlb_acct_memory(h, -gbl_resv);
6725 	}
6726 	/* Restore used_hpages for pages that failed global reservation */
6727 	if (gbl_reserve && spool) {
6728 		unsigned long flags;
6729 
6730 		spin_lock_irqsave(&spool->lock, flags);
6731 		if (spool->max_hpages != -1)
6732 			spool->used_hpages -= gbl_reserve;
6733 		unlock_or_release_subpool(spool, flags);
6734 	}
6735 out_uncharge_cgroup:
6736 	hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
6737 					    chg * pages_per_huge_page(h), h_cg);
6738 out_err:
6739 	if (!desc || desc->vm_flags & VM_MAYSHARE)
6740 		/* Only call region_abort if the region_chg succeeded but the
6741 		 * region_add failed or didn't run.
6742 		 */
6743 		if (chg >= 0 && add < 0)
6744 			region_abort(resv_map, from, to, regions_needed);
6745 	if (desc && is_vma_desc_resv_set(desc, HPAGE_RESV_OWNER)) {
6746 		kref_put(&resv_map->refs, resv_map_release);
6747 		set_vma_desc_resv_map(desc, NULL);
6748 	}
6749 	return err;
6750 }
6751 
6752 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
6753 								long freed)
6754 {
6755 	struct hstate *h = hstate_inode(inode);
6756 	struct resv_map *resv_map = inode_resv_map(inode);
6757 	long chg = 0;
6758 	struct hugepage_subpool *spool = subpool_inode(inode);
6759 	long gbl_reserve;
6760 
6761 	/*
6762 	 * Since this routine can be called in the evict inode path for all
6763 	 * hugetlbfs inodes, resv_map could be NULL.
6764 	 */
6765 	if (resv_map) {
6766 		chg = region_del(resv_map, start, end);
6767 		/*
6768 		 * region_del() can fail in the rare case where a region
6769 		 * must be split and another region descriptor can not be
6770 		 * allocated.  If end == LONG_MAX, it will not fail.
6771 		 */
6772 		if (chg < 0)
6773 			return chg;
6774 	}
6775 
6776 	spin_lock(&inode->i_lock);
6777 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
6778 	spin_unlock(&inode->i_lock);
6779 
6780 	/*
6781 	 * If the subpool has a minimum size, the number of global
6782 	 * reservations to be released may be adjusted.
6783 	 *
6784 	 * Note that !resv_map implies freed == 0. So (chg - freed)
6785 	 * won't go negative.
6786 	 */
6787 	gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
6788 	hugetlb_acct_memory(h, -gbl_reserve);
6789 
6790 	return 0;
6791 }
6792 
6793 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
6794 static unsigned long page_table_shareable(struct vm_area_struct *svma,
6795 				struct vm_area_struct *vma,
6796 				unsigned long addr, pgoff_t idx)
6797 {
6798 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
6799 				svma->vm_start;
6800 	unsigned long sbase = saddr & PUD_MASK;
6801 	unsigned long s_end = sbase + PUD_SIZE;
6802 
6803 	/* Allow segments to share if only one is marked locked */
6804 	vm_flags_t vm_flags = vma->vm_flags & ~VM_LOCKED_MASK;
6805 	vm_flags_t svm_flags = svma->vm_flags & ~VM_LOCKED_MASK;
6806 
6807 	/*
6808 	 * match the virtual addresses, permission and the alignment of the
6809 	 * page table page.
6810 	 *
6811 	 * Also, vma_lock (vm_private_data) is required for sharing.
6812 	 */
6813 	if (pmd_index(addr) != pmd_index(saddr) ||
6814 	    vm_flags != svm_flags ||
6815 	    !range_in_vma(svma, sbase, s_end) ||
6816 	    !svma->vm_private_data)
6817 		return 0;
6818 
6819 	return saddr;
6820 }
6821 
6822 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6823 {
6824 	unsigned long start = addr & PUD_MASK;
6825 	unsigned long end = start + PUD_SIZE;
6826 
6827 #ifdef CONFIG_USERFAULTFD
6828 	if (uffd_disable_huge_pmd_share(vma))
6829 		return false;
6830 #endif
6831 	/*
6832 	 * check on proper vm_flags and page table alignment
6833 	 */
6834 	if (!(vma->vm_flags & VM_MAYSHARE))
6835 		return false;
6836 	if (!vma->vm_private_data)	/* vma lock required for sharing */
6837 		return false;
6838 	if (!range_in_vma(vma, start, end))
6839 		return false;
6840 	return true;
6841 }
6842 
6843 /*
6844  * Determine if start,end range within vma could be mapped by shared pmd.
6845  * If yes, adjust start and end to cover range associated with possible
6846  * shared pmd mappings.
6847  */
6848 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6849 				unsigned long *start, unsigned long *end)
6850 {
6851 	unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
6852 		v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
6853 
6854 	/*
6855 	 * vma needs to span at least one aligned PUD size, and the range
6856 	 * must be at least partially within in.
6857 	 */
6858 	if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
6859 		(*end <= v_start) || (*start >= v_end))
6860 		return;
6861 
6862 	/* Extend the range to be PUD aligned for a worst case scenario */
6863 	if (*start > v_start)
6864 		*start = ALIGN_DOWN(*start, PUD_SIZE);
6865 
6866 	if (*end < v_end)
6867 		*end = ALIGN(*end, PUD_SIZE);
6868 }
6869 
6870 /*
6871  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
6872  * and returns the corresponding pte. While this is not necessary for the
6873  * !shared pmd case because we can allocate the pmd later as well, it makes the
6874  * code much cleaner. pmd allocation is essential for the shared case because
6875  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
6876  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
6877  * bad pmd for sharing.
6878  */
6879 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6880 		      unsigned long addr, pud_t *pud)
6881 {
6882 	struct address_space *mapping = vma->vm_file->f_mapping;
6883 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
6884 			vma->vm_pgoff;
6885 	struct vm_area_struct *svma;
6886 	unsigned long saddr;
6887 	pte_t *spte = NULL;
6888 	pte_t *pte;
6889 
6890 	i_mmap_lock_read(mapping);
6891 	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
6892 		if (svma == vma)
6893 			continue;
6894 
6895 		saddr = page_table_shareable(svma, vma, addr, idx);
6896 		if (saddr) {
6897 			spte = hugetlb_walk(svma, saddr,
6898 					    vma_mmu_pagesize(svma));
6899 			if (spte) {
6900 				ptdesc_pmd_pts_inc(virt_to_ptdesc(spte));
6901 				break;
6902 			}
6903 		}
6904 	}
6905 
6906 	if (!spte)
6907 		goto out;
6908 
6909 	spin_lock(&mm->page_table_lock);
6910 	if (pud_none(*pud)) {
6911 		pud_populate(mm, pud,
6912 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
6913 		mm_inc_nr_pmds(mm);
6914 	} else {
6915 		ptdesc_pmd_pts_dec(virt_to_ptdesc(spte));
6916 	}
6917 	spin_unlock(&mm->page_table_lock);
6918 out:
6919 	pte = (pte_t *)pmd_alloc(mm, pud, addr);
6920 	i_mmap_unlock_read(mapping);
6921 	return pte;
6922 }
6923 
6924 /**
6925  * huge_pmd_unshare - Unmap a pmd table if it is shared by multiple users
6926  * @tlb: the current mmu_gather.
6927  * @vma: the vma covering the pmd table.
6928  * @addr: the address we are trying to unshare.
6929  * @ptep: pointer into the (pmd) page table.
6930  *
6931  * Called with the page table lock held, the i_mmap_rwsem held in write mode
6932  * and the hugetlb vma lock held in write mode.
6933  *
6934  * Note: The caller must call huge_pmd_unshare_flush() before dropping the
6935  * i_mmap_rwsem.
6936  *
6937  * Returns: 1 if it was a shared PMD table and it got unmapped, or 0 if it
6938  *	    was not a shared PMD table.
6939  */
6940 int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma,
6941 		unsigned long addr, pte_t *ptep)
6942 {
6943 	unsigned long sz = huge_page_size(hstate_vma(vma));
6944 	struct mm_struct *mm = vma->vm_mm;
6945 	pgd_t *pgd = pgd_offset(mm, addr);
6946 	p4d_t *p4d = p4d_offset(pgd, addr);
6947 	pud_t *pud = pud_offset(p4d, addr);
6948 
6949 	if (sz != PMD_SIZE)
6950 		return 0;
6951 	if (!ptdesc_pmd_is_shared(virt_to_ptdesc(ptep)))
6952 		return 0;
6953 	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
6954 	hugetlb_vma_assert_locked(vma);
6955 	pud_clear(pud);
6956 
6957 	tlb_unshare_pmd_ptdesc(tlb, virt_to_ptdesc(ptep), addr);
6958 
6959 	mm_dec_nr_pmds(mm);
6960 	return 1;
6961 }
6962 
6963 /*
6964  * huge_pmd_unshare_flush - Complete a sequence of huge_pmd_unshare() calls
6965  * @tlb: the current mmu_gather.
6966  * @vma: the vma covering the pmd table.
6967  *
6968  * Perform necessary TLB flushes or IPI broadcasts to synchronize PMD table
6969  * unsharing with concurrent page table walkers.
6970  *
6971  * This function must be called after a sequence of huge_pmd_unshare()
6972  * calls while still holding the i_mmap_rwsem.
6973  */
6974 void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma)
6975 {
6976 	/*
6977 	 * We must synchronize page table unsharing such that nobody will
6978 	 * try reusing a previously-shared page table while it might still
6979 	 * be in use by previous sharers (TLB, GUP_fast).
6980 	 */
6981 	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
6982 
6983 	tlb_flush_unshared_tables(tlb);
6984 }
6985 
6986 #else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
6987 
6988 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6989 		      unsigned long addr, pud_t *pud)
6990 {
6991 	return NULL;
6992 }
6993 
6994 int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma,
6995 		unsigned long addr, pte_t *ptep)
6996 {
6997 	return 0;
6998 }
6999 
7000 void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma)
7001 {
7002 }
7003 
7004 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
7005 				unsigned long *start, unsigned long *end)
7006 {
7007 }
7008 
7009 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
7010 {
7011 	return false;
7012 }
7013 #endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
7014 
7015 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
7016 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
7017 			unsigned long addr, unsigned long sz)
7018 {
7019 	pgd_t *pgd;
7020 	p4d_t *p4d;
7021 	pud_t *pud;
7022 	pte_t *pte = NULL;
7023 
7024 	pgd = pgd_offset(mm, addr);
7025 	p4d = p4d_alloc(mm, pgd, addr);
7026 	if (!p4d)
7027 		return NULL;
7028 	pud = pud_alloc(mm, p4d, addr);
7029 	if (pud) {
7030 		if (sz == PUD_SIZE) {
7031 			pte = (pte_t *)pud;
7032 		} else {
7033 			BUG_ON(sz != PMD_SIZE);
7034 			if (want_pmd_share(vma, addr) && pud_none(*pud))
7035 				pte = huge_pmd_share(mm, vma, addr, pud);
7036 			else
7037 				pte = (pte_t *)pmd_alloc(mm, pud, addr);
7038 		}
7039 	}
7040 
7041 	if (pte) {
7042 		pte_t pteval = ptep_get_lockless(pte);
7043 
7044 		BUG_ON(pte_present(pteval) && !pte_huge(pteval));
7045 	}
7046 
7047 	return pte;
7048 }
7049 
7050 /*
7051  * huge_pte_offset() - Walk the page table to resolve the hugepage
7052  * entry at address @addr
7053  *
7054  * Return: Pointer to page table entry (PUD or PMD) for
7055  * address @addr, or NULL if a !p*d_present() entry is encountered and the
7056  * size @sz doesn't match the hugepage size at this level of the page
7057  * table.
7058  */
7059 pte_t *huge_pte_offset(struct mm_struct *mm,
7060 		       unsigned long addr, unsigned long sz)
7061 {
7062 	pgd_t *pgd;
7063 	p4d_t *p4d;
7064 	pud_t *pud;
7065 	pmd_t *pmd;
7066 
7067 	pgd = pgd_offset(mm, addr);
7068 	if (!pgd_present(*pgd))
7069 		return NULL;
7070 	p4d = p4d_offset(pgd, addr);
7071 	if (!p4d_present(*p4d))
7072 		return NULL;
7073 
7074 	pud = pud_offset(p4d, addr);
7075 	if (sz == PUD_SIZE)
7076 		/* must be pud huge, non-present or none */
7077 		return (pte_t *)pud;
7078 	if (!pud_present(*pud))
7079 		return NULL;
7080 	/* must have a valid entry and size to go further */
7081 
7082 	pmd = pmd_offset(pud, addr);
7083 	/* must be pmd huge, non-present or none */
7084 	return (pte_t *)pmd;
7085 }
7086 
7087 /*
7088  * Return a mask that can be used to update an address to the last huge
7089  * page in a page table page mapping size.  Used to skip non-present
7090  * page table entries when linearly scanning address ranges.  Architectures
7091  * with unique huge page to page table relationships can define their own
7092  * version of this routine.
7093  */
7094 unsigned long hugetlb_mask_last_page(struct hstate *h)
7095 {
7096 	unsigned long hp_size = huge_page_size(h);
7097 
7098 	if (hp_size == PUD_SIZE)
7099 		return P4D_SIZE - PUD_SIZE;
7100 	else if (hp_size == PMD_SIZE)
7101 		return PUD_SIZE - PMD_SIZE;
7102 	else
7103 		return 0UL;
7104 }
7105 
7106 #else
7107 
7108 /* See description above.  Architectures can provide their own version. */
7109 __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
7110 {
7111 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
7112 	if (huge_page_size(h) == PMD_SIZE)
7113 		return PUD_SIZE - PMD_SIZE;
7114 #endif
7115 	return 0UL;
7116 }
7117 
7118 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
7119 
7120 /**
7121  * folio_isolate_hugetlb - try to isolate an allocated hugetlb folio
7122  * @folio: the folio to isolate
7123  * @list: the list to add the folio to on success
7124  *
7125  * Isolate an allocated (refcount > 0) hugetlb folio, marking it as
7126  * isolated/non-migratable, and moving it from the active list to the
7127  * given list.
7128  *
7129  * Isolation will fail if @folio is not an allocated hugetlb folio, or if
7130  * it is already isolated/non-migratable.
7131  *
7132  * On success, an additional folio reference is taken that must be dropped
7133  * using folio_putback_hugetlb() to undo the isolation.
7134  *
7135  * Return: True if isolation worked, otherwise False.
7136  */
7137 bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list)
7138 {
7139 	bool ret = true;
7140 
7141 	spin_lock_irq(&hugetlb_lock);
7142 	if (!folio_test_hugetlb(folio) ||
7143 	    !folio_test_hugetlb_migratable(folio) ||
7144 	    !folio_try_get(folio)) {
7145 		ret = false;
7146 		goto unlock;
7147 	}
7148 	folio_clear_hugetlb_migratable(folio);
7149 	list_move_tail(&folio->lru, list);
7150 unlock:
7151 	spin_unlock_irq(&hugetlb_lock);
7152 	return ret;
7153 }
7154 
7155 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
7156 {
7157 	int ret = 0;
7158 
7159 	*hugetlb = false;
7160 	spin_lock_irq(&hugetlb_lock);
7161 	if (folio_test_hugetlb(folio)) {
7162 		*hugetlb = true;
7163 		if (folio_test_hugetlb_freed(folio))
7164 			ret = 0;
7165 		else if (folio_test_hugetlb_migratable(folio) || unpoison)
7166 			ret = folio_try_get(folio);
7167 		else
7168 			ret = -EBUSY;
7169 	}
7170 	spin_unlock_irq(&hugetlb_lock);
7171 	return ret;
7172 }
7173 
7174 int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
7175 				bool *migratable_cleared)
7176 {
7177 	int ret;
7178 
7179 	spin_lock_irq(&hugetlb_lock);
7180 	ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared);
7181 	spin_unlock_irq(&hugetlb_lock);
7182 	return ret;
7183 }
7184 
7185 /**
7186  * folio_putback_hugetlb - unisolate a hugetlb folio
7187  * @folio: the isolated hugetlb folio
7188  *
7189  * Putback/un-isolate the hugetlb folio that was previous isolated using
7190  * folio_isolate_hugetlb(): marking it non-isolated/migratable and putting it
7191  * back onto the active list.
7192  *
7193  * Will drop the additional folio reference obtained through
7194  * folio_isolate_hugetlb().
7195  */
7196 void folio_putback_hugetlb(struct folio *folio)
7197 {
7198 	spin_lock_irq(&hugetlb_lock);
7199 	folio_set_hugetlb_migratable(folio);
7200 	list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist);
7201 	spin_unlock_irq(&hugetlb_lock);
7202 	folio_put(folio);
7203 }
7204 
7205 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
7206 {
7207 	struct hstate *h = folio_hstate(old_folio);
7208 
7209 	hugetlb_cgroup_migrate(old_folio, new_folio);
7210 	folio_set_owner_migrate_reason(new_folio, reason);
7211 
7212 	/*
7213 	 * transfer temporary state of the new hugetlb folio. This is
7214 	 * reverse to other transitions because the newpage is going to
7215 	 * be final while the old one will be freed so it takes over
7216 	 * the temporary status.
7217 	 *
7218 	 * Also note that we have to transfer the per-node surplus state
7219 	 * here as well otherwise the global surplus count will not match
7220 	 * the per-node's.
7221 	 */
7222 	if (folio_test_hugetlb_temporary(new_folio)) {
7223 		int old_nid = folio_nid(old_folio);
7224 		int new_nid = folio_nid(new_folio);
7225 
7226 		folio_set_hugetlb_temporary(old_folio);
7227 		folio_clear_hugetlb_temporary(new_folio);
7228 
7229 
7230 		/*
7231 		 * There is no need to transfer the per-node surplus state
7232 		 * when we do not cross the node.
7233 		 */
7234 		if (new_nid == old_nid)
7235 			return;
7236 		spin_lock_irq(&hugetlb_lock);
7237 		if (h->surplus_huge_pages_node[old_nid]) {
7238 			h->surplus_huge_pages_node[old_nid]--;
7239 			h->surplus_huge_pages_node[new_nid]++;
7240 		}
7241 		spin_unlock_irq(&hugetlb_lock);
7242 	}
7243 
7244 	/*
7245 	 * Our old folio is isolated and has "migratable" cleared until it
7246 	 * is putback. As migration succeeded, set the new folio "migratable"
7247 	 * and add it to the active list.
7248 	 */
7249 	spin_lock_irq(&hugetlb_lock);
7250 	folio_set_hugetlb_migratable(new_folio);
7251 	list_move_tail(&new_folio->lru, &(folio_hstate(new_folio))->hugepage_activelist);
7252 	spin_unlock_irq(&hugetlb_lock);
7253 }
7254 
7255 /*
7256  * If @take_locks is false, the caller must ensure that no concurrent page table
7257  * access can happen (except for gup_fast() and hardware page walks).
7258  * If @take_locks is true, we take the hugetlb VMA lock (to lock out things like
7259  * concurrent page fault handling) and the file rmap lock.
7260  */
7261 static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
7262 				   unsigned long start,
7263 				   unsigned long end,
7264 				   bool take_locks)
7265 {
7266 	struct hstate *h = hstate_vma(vma);
7267 	unsigned long sz = huge_page_size(h);
7268 	struct mm_struct *mm = vma->vm_mm;
7269 	struct mmu_notifier_range range;
7270 	struct mmu_gather tlb;
7271 	unsigned long address;
7272 	spinlock_t *ptl;
7273 	pte_t *ptep;
7274 
7275 	if (!(vma->vm_flags & VM_MAYSHARE))
7276 		return;
7277 
7278 	if (start >= end)
7279 		return;
7280 
7281 	flush_cache_range(vma, start, end);
7282 	tlb_gather_mmu_vma(&tlb, vma);
7283 
7284 	/*
7285 	 * No need to call adjust_range_if_pmd_sharing_possible(), because
7286 	 * we have already done the PUD_SIZE alignment.
7287 	 */
7288 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
7289 				start, end);
7290 	mmu_notifier_invalidate_range_start(&range);
7291 	if (take_locks) {
7292 		hugetlb_vma_lock_write(vma);
7293 		i_mmap_lock_write(vma->vm_file->f_mapping);
7294 	} else {
7295 		i_mmap_assert_write_locked(vma->vm_file->f_mapping);
7296 	}
7297 	for (address = start; address < end; address += PUD_SIZE) {
7298 		ptep = hugetlb_walk(vma, address, sz);
7299 		if (!ptep)
7300 			continue;
7301 		ptl = huge_pte_lock(h, mm, ptep);
7302 		huge_pmd_unshare(&tlb, vma, address, ptep);
7303 		spin_unlock(ptl);
7304 	}
7305 	huge_pmd_unshare_flush(&tlb, vma);
7306 	if (take_locks) {
7307 		i_mmap_unlock_write(vma->vm_file->f_mapping);
7308 		hugetlb_vma_unlock_write(vma);
7309 	}
7310 	/*
7311 	 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see
7312 	 * Documentation/mm/mmu_notifier.rst.
7313 	 */
7314 	mmu_notifier_invalidate_range_end(&range);
7315 	tlb_finish_mmu(&tlb);
7316 }
7317 
7318 /*
7319  * This function will unconditionally remove all the shared pmd pgtable entries
7320  * within the specific vma for a hugetlbfs memory range.
7321  */
7322 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
7323 {
7324 	hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
7325 			ALIGN_DOWN(vma->vm_end, PUD_SIZE),
7326 			/* take_locks = */ true);
7327 }
7328 
7329 /*
7330  * For hugetlb, mremap() is an odd edge case - while the VMA copying is
7331  * performed, we permit both the old and new VMAs to reference the same
7332  * reservation.
7333  *
7334  * We fix this up after the operation succeeds, or if a newly allocated VMA
7335  * is closed as a result of a failure to allocate memory.
7336  */
7337 void fixup_hugetlb_reservations(struct vm_area_struct *vma)
7338 {
7339 	if (is_vm_hugetlb_page(vma))
7340 		clear_vma_resv_huge_pages(vma);
7341 }
7342