1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic hugetlb support.
4 * (C) Nadia Yvette Chambers, April 2004
5 */
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/highmem.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/compiler.h>
16 #include <linux/cpumask.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/memblock.h>
20 #include <linux/minmax.h>
21 #include <linux/slab.h>
22 #include <linux/sched/mm.h>
23 #include <linux/mmdebug.h>
24 #include <linux/sched/signal.h>
25 #include <linux/rmap.h>
26 #include <linux/string_choices.h>
27 #include <linux/string_helpers.h>
28 #include <linux/swap.h>
29 #include <linux/leafops.h>
30 #include <linux/jhash.h>
31 #include <linux/numa.h>
32 #include <linux/llist.h>
33 #include <linux/cma.h>
34 #include <linux/migrate.h>
35 #include <linux/nospec.h>
36 #include <linux/delayacct.h>
37 #include <linux/memory.h>
38 #include <linux/mm_inline.h>
39 #include <linux/padata.h>
40 #include <linux/pgalloc.h>
41
42 #include <asm/page.h>
43 #include <asm/tlb.h>
44 #include <asm/setup.h>
45
46 #include <linux/io.h>
47 #include <linux/node.h>
48 #include <linux/page_owner.h>
49 #include "internal.h"
50 #include "hugetlb_vmemmap.h"
51 #include "hugetlb_cma.h"
52 #include "hugetlb_internal.h"
53 #include <linux/page-isolation.h>
54
55 int hugetlb_max_hstate __read_mostly;
56 unsigned int default_hstate_idx;
57 struct hstate hstates[HUGE_MAX_HSTATE];
58
59 __initdata nodemask_t hugetlb_bootmem_nodes;
60 __initdata struct list_head huge_boot_pages[MAX_NUMNODES];
61 static unsigned long hstate_boot_nrinvalid[HUGE_MAX_HSTATE] __initdata;
62
63 /*
64 * Due to ordering constraints across the init code for various
65 * architectures, hugetlb hstate cmdline parameters can't simply
66 * be early_param. early_param might call the setup function
67 * before valid hugetlb page sizes are determined, leading to
68 * incorrect rejection of valid hugepagesz= options.
69 *
70 * So, record the parameters early and consume them whenever the
71 * init code is ready for them, by calling hugetlb_parse_params().
72 */
73
74 /* one (hugepagesz=,hugepages=) pair per hstate, one default_hugepagesz */
75 #define HUGE_MAX_CMDLINE_ARGS (2 * HUGE_MAX_HSTATE + 1)
76 struct hugetlb_cmdline {
77 char *val;
78 int (*setup)(char *val);
79 };
80
81 /* for command line parsing */
82 static struct hstate * __initdata parsed_hstate;
83 static unsigned long __initdata default_hstate_max_huge_pages;
84 static bool __initdata parsed_valid_hugepagesz = true;
85 static bool __initdata parsed_default_hugepagesz;
86 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
87 static unsigned long hugepage_allocation_threads __initdata;
88
89 static char hstate_cmdline_buf[COMMAND_LINE_SIZE] __initdata;
90 static int hstate_cmdline_index __initdata;
91 static struct hugetlb_cmdline hugetlb_params[HUGE_MAX_CMDLINE_ARGS] __initdata;
92 static int hugetlb_param_index __initdata;
93 static __init int hugetlb_add_param(char *s, int (*setup)(char *val));
94 static __init void hugetlb_parse_params(void);
95
96 #define hugetlb_early_param(str, func) \
97 static __init int func##args(char *s) \
98 { \
99 return hugetlb_add_param(s, func); \
100 } \
101 early_param(str, func##args)
102
103 /*
104 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
105 * free_huge_pages, and surplus_huge_pages.
106 */
107 __cacheline_aligned_in_smp DEFINE_SPINLOCK(hugetlb_lock);
108
109 /*
110 * Serializes faults on the same logical page. This is used to
111 * prevent spurious OOMs when the hugepage pool is fully utilized.
112 */
113 static int num_fault_mutexes __ro_after_init;
114 struct mutex *hugetlb_fault_mutex_table __ro_after_init;
115
116 /* Forward declaration */
117 static int hugetlb_acct_memory(struct hstate *h, long delta);
118 static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
119 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
120 static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
121 unsigned long start, unsigned long end, bool take_locks);
122 static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
123
hugetlb_free_folio(struct folio * folio)124 static void hugetlb_free_folio(struct folio *folio)
125 {
126 if (folio_test_hugetlb_cma(folio)) {
127 hugetlb_cma_free_folio(folio);
128 return;
129 }
130
131 folio_put(folio);
132 }
133
subpool_is_free(struct hugepage_subpool * spool)134 static inline bool subpool_is_free(struct hugepage_subpool *spool)
135 {
136 if (spool->count)
137 return false;
138 if (spool->max_hpages != -1)
139 return spool->used_hpages == 0;
140 if (spool->min_hpages != -1)
141 return spool->rsv_hpages == spool->min_hpages;
142
143 return true;
144 }
145
unlock_or_release_subpool(struct hugepage_subpool * spool,unsigned long irq_flags)146 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
147 unsigned long irq_flags)
148 {
149 spin_unlock_irqrestore(&spool->lock, irq_flags);
150
151 /* If no pages are used, and no other handles to the subpool
152 * remain, give up any reservations based on minimum size and
153 * free the subpool */
154 if (subpool_is_free(spool)) {
155 if (spool->min_hpages != -1)
156 hugetlb_acct_memory(spool->hstate,
157 -spool->min_hpages);
158 kfree(spool);
159 }
160 }
161
hugepage_new_subpool(struct hstate * h,long max_hpages,long min_hpages)162 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
163 long min_hpages)
164 {
165 struct hugepage_subpool *spool;
166
167 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
168 if (!spool)
169 return NULL;
170
171 spin_lock_init(&spool->lock);
172 spool->count = 1;
173 spool->max_hpages = max_hpages;
174 spool->hstate = h;
175 spool->min_hpages = min_hpages;
176
177 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
178 kfree(spool);
179 return NULL;
180 }
181 spool->rsv_hpages = min_hpages;
182
183 return spool;
184 }
185
hugepage_put_subpool(struct hugepage_subpool * spool)186 void hugepage_put_subpool(struct hugepage_subpool *spool)
187 {
188 unsigned long flags;
189
190 spin_lock_irqsave(&spool->lock, flags);
191 BUG_ON(!spool->count);
192 spool->count--;
193 unlock_or_release_subpool(spool, flags);
194 }
195
196 /*
197 * Subpool accounting for allocating and reserving pages.
198 * Return -ENOMEM if there are not enough resources to satisfy the
199 * request. Otherwise, return the number of pages by which the
200 * global pools must be adjusted (upward). The returned value may
201 * only be different than the passed value (delta) in the case where
202 * a subpool minimum size must be maintained.
203 */
hugepage_subpool_get_pages(struct hugepage_subpool * spool,long delta)204 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
205 long delta)
206 {
207 long ret = delta;
208
209 if (!spool)
210 return ret;
211
212 spin_lock_irq(&spool->lock);
213
214 if (spool->max_hpages != -1) { /* maximum size accounting */
215 if ((spool->used_hpages + delta) <= spool->max_hpages)
216 spool->used_hpages += delta;
217 else {
218 ret = -ENOMEM;
219 goto unlock_ret;
220 }
221 }
222
223 /* minimum size accounting */
224 if (spool->min_hpages != -1 && spool->rsv_hpages) {
225 if (delta > spool->rsv_hpages) {
226 /*
227 * Asking for more reserves than those already taken on
228 * behalf of subpool. Return difference.
229 */
230 ret = delta - spool->rsv_hpages;
231 spool->rsv_hpages = 0;
232 } else {
233 ret = 0; /* reserves already accounted for */
234 spool->rsv_hpages -= delta;
235 }
236 }
237
238 unlock_ret:
239 spin_unlock_irq(&spool->lock);
240 return ret;
241 }
242
243 /*
244 * Subpool accounting for freeing and unreserving pages.
245 * Return the number of global page reservations that must be dropped.
246 * The return value may only be different than the passed value (delta)
247 * in the case where a subpool minimum size must be maintained.
248 */
hugepage_subpool_put_pages(struct hugepage_subpool * spool,long delta)249 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
250 long delta)
251 {
252 long ret = delta;
253 unsigned long flags;
254
255 if (!spool)
256 return delta;
257
258 spin_lock_irqsave(&spool->lock, flags);
259
260 if (spool->max_hpages != -1) /* maximum size accounting */
261 spool->used_hpages -= delta;
262
263 /* minimum size accounting */
264 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
265 if (spool->rsv_hpages + delta <= spool->min_hpages)
266 ret = 0;
267 else
268 ret = spool->rsv_hpages + delta - spool->min_hpages;
269
270 spool->rsv_hpages += delta;
271 if (spool->rsv_hpages > spool->min_hpages)
272 spool->rsv_hpages = spool->min_hpages;
273 }
274
275 /*
276 * If hugetlbfs_put_super couldn't free spool due to an outstanding
277 * quota reference, free it now.
278 */
279 unlock_or_release_subpool(spool, flags);
280
281 return ret;
282 }
283
subpool_vma(struct vm_area_struct * vma)284 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
285 {
286 return subpool_inode(file_inode(vma->vm_file));
287 }
288
289 /*
290 * hugetlb vma_lock helper routines
291 */
hugetlb_vma_lock_read(struct vm_area_struct * vma)292 void hugetlb_vma_lock_read(struct vm_area_struct *vma)
293 {
294 if (__vma_shareable_lock(vma)) {
295 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
296
297 down_read(&vma_lock->rw_sema);
298 } else if (__vma_private_lock(vma)) {
299 struct resv_map *resv_map = vma_resv_map(vma);
300
301 down_read(&resv_map->rw_sema);
302 }
303 }
304
hugetlb_vma_unlock_read(struct vm_area_struct * vma)305 void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
306 {
307 if (__vma_shareable_lock(vma)) {
308 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
309
310 up_read(&vma_lock->rw_sema);
311 } else if (__vma_private_lock(vma)) {
312 struct resv_map *resv_map = vma_resv_map(vma);
313
314 up_read(&resv_map->rw_sema);
315 }
316 }
317
hugetlb_vma_lock_write(struct vm_area_struct * vma)318 void hugetlb_vma_lock_write(struct vm_area_struct *vma)
319 {
320 if (__vma_shareable_lock(vma)) {
321 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
322
323 down_write(&vma_lock->rw_sema);
324 } else if (__vma_private_lock(vma)) {
325 struct resv_map *resv_map = vma_resv_map(vma);
326
327 down_write(&resv_map->rw_sema);
328 }
329 }
330
hugetlb_vma_unlock_write(struct vm_area_struct * vma)331 void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
332 {
333 if (__vma_shareable_lock(vma)) {
334 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
335
336 up_write(&vma_lock->rw_sema);
337 } else if (__vma_private_lock(vma)) {
338 struct resv_map *resv_map = vma_resv_map(vma);
339
340 up_write(&resv_map->rw_sema);
341 }
342 }
343
hugetlb_vma_trylock_write(struct vm_area_struct * vma)344 int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
345 {
346
347 if (__vma_shareable_lock(vma)) {
348 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
349
350 return down_write_trylock(&vma_lock->rw_sema);
351 } else if (__vma_private_lock(vma)) {
352 struct resv_map *resv_map = vma_resv_map(vma);
353
354 return down_write_trylock(&resv_map->rw_sema);
355 }
356
357 return 1;
358 }
359
hugetlb_vma_assert_locked(struct vm_area_struct * vma)360 void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
361 {
362 if (__vma_shareable_lock(vma)) {
363 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
364
365 lockdep_assert_held(&vma_lock->rw_sema);
366 } else if (__vma_private_lock(vma)) {
367 struct resv_map *resv_map = vma_resv_map(vma);
368
369 lockdep_assert_held(&resv_map->rw_sema);
370 }
371 }
372
hugetlb_vma_lock_release(struct kref * kref)373 void hugetlb_vma_lock_release(struct kref *kref)
374 {
375 struct hugetlb_vma_lock *vma_lock = container_of(kref,
376 struct hugetlb_vma_lock, refs);
377
378 kfree(vma_lock);
379 }
380
__hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock * vma_lock)381 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
382 {
383 struct vm_area_struct *vma = vma_lock->vma;
384
385 /*
386 * vma_lock structure may or not be released as a result of put,
387 * it certainly will no longer be attached to vma so clear pointer.
388 * Semaphore synchronizes access to vma_lock->vma field.
389 */
390 vma_lock->vma = NULL;
391 vma->vm_private_data = NULL;
392 up_write(&vma_lock->rw_sema);
393 kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
394 }
395
__hugetlb_vma_unlock_write_free(struct vm_area_struct * vma)396 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
397 {
398 if (__vma_shareable_lock(vma)) {
399 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
400
401 __hugetlb_vma_unlock_write_put(vma_lock);
402 } else if (__vma_private_lock(vma)) {
403 struct resv_map *resv_map = vma_resv_map(vma);
404
405 /* no free for anon vmas, but still need to unlock */
406 up_write(&resv_map->rw_sema);
407 }
408 }
409
hugetlb_vma_lock_free(struct vm_area_struct * vma)410 static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
411 {
412 /*
413 * Only present in sharable vmas.
414 */
415 if (!vma || !__vma_shareable_lock(vma))
416 return;
417
418 if (vma->vm_private_data) {
419 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
420
421 down_write(&vma_lock->rw_sema);
422 __hugetlb_vma_unlock_write_put(vma_lock);
423 }
424 }
425
426 /*
427 * vma specific semaphore used for pmd sharing and fault/truncation
428 * synchronization
429 */
hugetlb_vma_lock_alloc(struct vm_area_struct * vma)430 int hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
431 {
432 struct hugetlb_vma_lock *vma_lock;
433
434 /* Only establish in (flags) sharable vmas */
435 if (!vma || !(vma->vm_flags & VM_MAYSHARE))
436 return 0;
437
438 /* Should never get here with non-NULL vm_private_data */
439 if (vma->vm_private_data)
440 return -EINVAL;
441
442 vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
443 if (!vma_lock) {
444 /*
445 * If we can not allocate structure, then vma can not
446 * participate in pmd sharing. This is only a possible
447 * performance enhancement and memory saving issue.
448 * However, the lock is also used to synchronize page
449 * faults with truncation. If the lock is not present,
450 * unlikely races could leave pages in a file past i_size
451 * until the file is removed. Warn in the unlikely case of
452 * allocation failure.
453 */
454 pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
455 return -EINVAL;
456 }
457
458 kref_init(&vma_lock->refs);
459 init_rwsem(&vma_lock->rw_sema);
460 vma_lock->vma = vma;
461 vma->vm_private_data = vma_lock;
462
463 return 0;
464 }
465
466 /* Helper that removes a struct file_region from the resv_map cache and returns
467 * it for use.
468 */
469 static struct file_region *
get_file_region_entry_from_cache(struct resv_map * resv,long from,long to)470 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
471 {
472 struct file_region *nrg;
473
474 VM_BUG_ON(resv->region_cache_count <= 0);
475
476 resv->region_cache_count--;
477 nrg = list_first_entry(&resv->region_cache, struct file_region, link);
478 list_del(&nrg->link);
479
480 nrg->from = from;
481 nrg->to = to;
482
483 return nrg;
484 }
485
copy_hugetlb_cgroup_uncharge_info(struct file_region * nrg,struct file_region * rg)486 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
487 struct file_region *rg)
488 {
489 #ifdef CONFIG_CGROUP_HUGETLB
490 nrg->reservation_counter = rg->reservation_counter;
491 nrg->css = rg->css;
492 if (rg->css)
493 css_get(rg->css);
494 #endif
495 }
496
497 /* Helper that records hugetlb_cgroup uncharge info. */
record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup * h_cg,struct hstate * h,struct resv_map * resv,struct file_region * nrg)498 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
499 struct hstate *h,
500 struct resv_map *resv,
501 struct file_region *nrg)
502 {
503 #ifdef CONFIG_CGROUP_HUGETLB
504 if (h_cg) {
505 nrg->reservation_counter =
506 &h_cg->rsvd_hugepage[hstate_index(h)];
507 nrg->css = &h_cg->css;
508 /*
509 * The caller will hold exactly one h_cg->css reference for the
510 * whole contiguous reservation region. But this area might be
511 * scattered when there are already some file_regions reside in
512 * it. As a result, many file_regions may share only one css
513 * reference. In order to ensure that one file_region must hold
514 * exactly one h_cg->css reference, we should do css_get for
515 * each file_region and leave the reference held by caller
516 * untouched.
517 */
518 css_get(&h_cg->css);
519 if (!resv->pages_per_hpage)
520 resv->pages_per_hpage = pages_per_huge_page(h);
521 /* pages_per_hpage should be the same for all entries in
522 * a resv_map.
523 */
524 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
525 } else {
526 nrg->reservation_counter = NULL;
527 nrg->css = NULL;
528 }
529 #endif
530 }
531
put_uncharge_info(struct file_region * rg)532 static void put_uncharge_info(struct file_region *rg)
533 {
534 #ifdef CONFIG_CGROUP_HUGETLB
535 if (rg->css)
536 css_put(rg->css);
537 #endif
538 }
539
has_same_uncharge_info(struct file_region * rg,struct file_region * org)540 static bool has_same_uncharge_info(struct file_region *rg,
541 struct file_region *org)
542 {
543 #ifdef CONFIG_CGROUP_HUGETLB
544 return rg->reservation_counter == org->reservation_counter &&
545 rg->css == org->css;
546
547 #else
548 return true;
549 #endif
550 }
551
coalesce_file_region(struct resv_map * resv,struct file_region * rg)552 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
553 {
554 struct file_region *nrg, *prg;
555
556 prg = list_prev_entry(rg, link);
557 if (&prg->link != &resv->regions && prg->to == rg->from &&
558 has_same_uncharge_info(prg, rg)) {
559 prg->to = rg->to;
560
561 list_del(&rg->link);
562 put_uncharge_info(rg);
563 kfree(rg);
564
565 rg = prg;
566 }
567
568 nrg = list_next_entry(rg, link);
569 if (&nrg->link != &resv->regions && nrg->from == rg->to &&
570 has_same_uncharge_info(nrg, rg)) {
571 nrg->from = rg->from;
572
573 list_del(&rg->link);
574 put_uncharge_info(rg);
575 kfree(rg);
576 }
577 }
578
579 static inline long
hugetlb_resv_map_add(struct resv_map * map,struct list_head * rg,long from,long to,struct hstate * h,struct hugetlb_cgroup * cg,long * regions_needed)580 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
581 long to, struct hstate *h, struct hugetlb_cgroup *cg,
582 long *regions_needed)
583 {
584 struct file_region *nrg;
585
586 if (!regions_needed) {
587 nrg = get_file_region_entry_from_cache(map, from, to);
588 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
589 list_add(&nrg->link, rg);
590 coalesce_file_region(map, nrg);
591 } else
592 *regions_needed += 1;
593
594 return to - from;
595 }
596
597 /*
598 * Must be called with resv->lock held.
599 *
600 * Calling this with regions_needed != NULL will count the number of pages
601 * to be added but will not modify the linked list. And regions_needed will
602 * indicate the number of file_regions needed in the cache to carry out to add
603 * the regions for this range.
604 */
add_reservation_in_range(struct resv_map * resv,long f,long t,struct hugetlb_cgroup * h_cg,struct hstate * h,long * regions_needed)605 static long add_reservation_in_range(struct resv_map *resv, long f, long t,
606 struct hugetlb_cgroup *h_cg,
607 struct hstate *h, long *regions_needed)
608 {
609 long add = 0;
610 struct list_head *head = &resv->regions;
611 long last_accounted_offset = f;
612 struct file_region *iter, *trg = NULL;
613 struct list_head *rg = NULL;
614
615 if (regions_needed)
616 *regions_needed = 0;
617
618 /* In this loop, we essentially handle an entry for the range
619 * [last_accounted_offset, iter->from), at every iteration, with some
620 * bounds checking.
621 */
622 list_for_each_entry_safe(iter, trg, head, link) {
623 /* Skip irrelevant regions that start before our range. */
624 if (iter->from < f) {
625 /* If this region ends after the last accounted offset,
626 * then we need to update last_accounted_offset.
627 */
628 if (iter->to > last_accounted_offset)
629 last_accounted_offset = iter->to;
630 continue;
631 }
632
633 /* When we find a region that starts beyond our range, we've
634 * finished.
635 */
636 if (iter->from >= t) {
637 rg = iter->link.prev;
638 break;
639 }
640
641 /* Add an entry for last_accounted_offset -> iter->from, and
642 * update last_accounted_offset.
643 */
644 if (iter->from > last_accounted_offset)
645 add += hugetlb_resv_map_add(resv, iter->link.prev,
646 last_accounted_offset,
647 iter->from, h, h_cg,
648 regions_needed);
649
650 last_accounted_offset = iter->to;
651 }
652
653 /* Handle the case where our range extends beyond
654 * last_accounted_offset.
655 */
656 if (!rg)
657 rg = head->prev;
658 if (last_accounted_offset < t)
659 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
660 t, h, h_cg, regions_needed);
661
662 return add;
663 }
664
665 /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
666 */
allocate_file_region_entries(struct resv_map * resv,int regions_needed)667 static int allocate_file_region_entries(struct resv_map *resv,
668 int regions_needed)
669 __must_hold(&resv->lock)
670 {
671 LIST_HEAD(allocated_regions);
672 int to_allocate = 0, i = 0;
673 struct file_region *trg = NULL, *rg = NULL;
674
675 VM_BUG_ON(regions_needed < 0);
676
677 /*
678 * Check for sufficient descriptors in the cache to accommodate
679 * the number of in progress add operations plus regions_needed.
680 *
681 * This is a while loop because when we drop the lock, some other call
682 * to region_add or region_del may have consumed some region_entries,
683 * so we keep looping here until we finally have enough entries for
684 * (adds_in_progress + regions_needed).
685 */
686 while (resv->region_cache_count <
687 (resv->adds_in_progress + regions_needed)) {
688 to_allocate = resv->adds_in_progress + regions_needed -
689 resv->region_cache_count;
690
691 /* At this point, we should have enough entries in the cache
692 * for all the existing adds_in_progress. We should only be
693 * needing to allocate for regions_needed.
694 */
695 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
696
697 spin_unlock(&resv->lock);
698 for (i = 0; i < to_allocate; i++) {
699 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
700 if (!trg)
701 goto out_of_memory;
702 list_add(&trg->link, &allocated_regions);
703 }
704
705 spin_lock(&resv->lock);
706
707 list_splice(&allocated_regions, &resv->region_cache);
708 resv->region_cache_count += to_allocate;
709 }
710
711 return 0;
712
713 out_of_memory:
714 list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
715 list_del(&rg->link);
716 kfree(rg);
717 }
718 return -ENOMEM;
719 }
720
721 /*
722 * Add the huge page range represented by [f, t) to the reserve
723 * map. Regions will be taken from the cache to fill in this range.
724 * Sufficient regions should exist in the cache due to the previous
725 * call to region_chg with the same range, but in some cases the cache will not
726 * have sufficient entries due to races with other code doing region_add or
727 * region_del. The extra needed entries will be allocated.
728 *
729 * regions_needed is the out value provided by a previous call to region_chg.
730 *
731 * Return the number of new huge pages added to the map. This number is greater
732 * than or equal to zero. If file_region entries needed to be allocated for
733 * this operation and we were not able to allocate, it returns -ENOMEM.
734 * region_add of regions of length 1 never allocate file_regions and cannot
735 * fail; region_chg will always allocate at least 1 entry and a region_add for
736 * 1 page will only require at most 1 entry.
737 */
region_add(struct resv_map * resv,long f,long t,long in_regions_needed,struct hstate * h,struct hugetlb_cgroup * h_cg)738 static long region_add(struct resv_map *resv, long f, long t,
739 long in_regions_needed, struct hstate *h,
740 struct hugetlb_cgroup *h_cg)
741 {
742 long add = 0, actual_regions_needed = 0;
743
744 spin_lock(&resv->lock);
745 retry:
746
747 /* Count how many regions are actually needed to execute this add. */
748 add_reservation_in_range(resv, f, t, NULL, NULL,
749 &actual_regions_needed);
750
751 /*
752 * Check for sufficient descriptors in the cache to accommodate
753 * this add operation. Note that actual_regions_needed may be greater
754 * than in_regions_needed, as the resv_map may have been modified since
755 * the region_chg call. In this case, we need to make sure that we
756 * allocate extra entries, such that we have enough for all the
757 * existing adds_in_progress, plus the excess needed for this
758 * operation.
759 */
760 if (actual_regions_needed > in_regions_needed &&
761 resv->region_cache_count <
762 resv->adds_in_progress +
763 (actual_regions_needed - in_regions_needed)) {
764 /* region_add operation of range 1 should never need to
765 * allocate file_region entries.
766 */
767 VM_BUG_ON(t - f <= 1);
768
769 if (allocate_file_region_entries(
770 resv, actual_regions_needed - in_regions_needed)) {
771 return -ENOMEM;
772 }
773
774 goto retry;
775 }
776
777 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
778
779 resv->adds_in_progress -= in_regions_needed;
780
781 spin_unlock(&resv->lock);
782 return add;
783 }
784
785 /*
786 * Examine the existing reserve map and determine how many
787 * huge pages in the specified range [f, t) are NOT currently
788 * represented. This routine is called before a subsequent
789 * call to region_add that will actually modify the reserve
790 * map to add the specified range [f, t). region_chg does
791 * not change the number of huge pages represented by the
792 * map. A number of new file_region structures is added to the cache as a
793 * placeholder, for the subsequent region_add call to use. At least 1
794 * file_region structure is added.
795 *
796 * out_regions_needed is the number of regions added to the
797 * resv->adds_in_progress. This value needs to be provided to a follow up call
798 * to region_add or region_abort for proper accounting.
799 *
800 * Returns the number of huge pages that need to be added to the existing
801 * reservation map for the range [f, t). This number is greater or equal to
802 * zero. -ENOMEM is returned if a new file_region structure or cache entry
803 * is needed and can not be allocated.
804 */
region_chg(struct resv_map * resv,long f,long t,long * out_regions_needed)805 static long region_chg(struct resv_map *resv, long f, long t,
806 long *out_regions_needed)
807 {
808 long chg = 0;
809
810 spin_lock(&resv->lock);
811
812 /* Count how many hugepages in this range are NOT represented. */
813 chg = add_reservation_in_range(resv, f, t, NULL, NULL,
814 out_regions_needed);
815
816 if (*out_regions_needed == 0)
817 *out_regions_needed = 1;
818
819 if (allocate_file_region_entries(resv, *out_regions_needed))
820 return -ENOMEM;
821
822 resv->adds_in_progress += *out_regions_needed;
823
824 spin_unlock(&resv->lock);
825 return chg;
826 }
827
828 /*
829 * Abort the in progress add operation. The adds_in_progress field
830 * of the resv_map keeps track of the operations in progress between
831 * calls to region_chg and region_add. Operations are sometimes
832 * aborted after the call to region_chg. In such cases, region_abort
833 * is called to decrement the adds_in_progress counter. regions_needed
834 * is the value returned by the region_chg call, it is used to decrement
835 * the adds_in_progress counter.
836 *
837 * NOTE: The range arguments [f, t) are not needed or used in this
838 * routine. They are kept to make reading the calling code easier as
839 * arguments will match the associated region_chg call.
840 */
region_abort(struct resv_map * resv,long f,long t,long regions_needed)841 static void region_abort(struct resv_map *resv, long f, long t,
842 long regions_needed)
843 {
844 spin_lock(&resv->lock);
845 VM_BUG_ON(!resv->region_cache_count);
846 resv->adds_in_progress -= regions_needed;
847 spin_unlock(&resv->lock);
848 }
849
850 /*
851 * Delete the specified range [f, t) from the reserve map. If the
852 * t parameter is LONG_MAX, this indicates that ALL regions after f
853 * should be deleted. Locate the regions which intersect [f, t)
854 * and either trim, delete or split the existing regions.
855 *
856 * Returns the number of huge pages deleted from the reserve map.
857 * In the normal case, the return value is zero or more. In the
858 * case where a region must be split, a new region descriptor must
859 * be allocated. If the allocation fails, -ENOMEM will be returned.
860 * NOTE: If the parameter t == LONG_MAX, then we will never split
861 * a region and possibly return -ENOMEM. Callers specifying
862 * t == LONG_MAX do not need to check for -ENOMEM error.
863 */
region_del(struct resv_map * resv,long f,long t)864 static long region_del(struct resv_map *resv, long f, long t)
865 {
866 struct list_head *head = &resv->regions;
867 struct file_region *rg, *trg;
868 struct file_region *nrg = NULL;
869 long del = 0;
870
871 retry:
872 spin_lock(&resv->lock);
873 list_for_each_entry_safe(rg, trg, head, link) {
874 /*
875 * Skip regions before the range to be deleted. file_region
876 * ranges are normally of the form [from, to). However, there
877 * may be a "placeholder" entry in the map which is of the form
878 * (from, to) with from == to. Check for placeholder entries
879 * at the beginning of the range to be deleted.
880 */
881 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
882 continue;
883
884 if (rg->from >= t)
885 break;
886
887 if (f > rg->from && t < rg->to) { /* Must split region */
888 /*
889 * Check for an entry in the cache before dropping
890 * lock and attempting allocation.
891 */
892 if (!nrg &&
893 resv->region_cache_count > resv->adds_in_progress) {
894 nrg = list_first_entry(&resv->region_cache,
895 struct file_region,
896 link);
897 list_del(&nrg->link);
898 resv->region_cache_count--;
899 }
900
901 if (!nrg) {
902 spin_unlock(&resv->lock);
903 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
904 if (!nrg)
905 return -ENOMEM;
906 goto retry;
907 }
908
909 del += t - f;
910 hugetlb_cgroup_uncharge_file_region(
911 resv, rg, t - f, false);
912
913 /* New entry for end of split region */
914 nrg->from = t;
915 nrg->to = rg->to;
916
917 copy_hugetlb_cgroup_uncharge_info(nrg, rg);
918
919 INIT_LIST_HEAD(&nrg->link);
920
921 /* Original entry is trimmed */
922 rg->to = f;
923
924 list_add(&nrg->link, &rg->link);
925 nrg = NULL;
926 break;
927 }
928
929 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
930 del += rg->to - rg->from;
931 hugetlb_cgroup_uncharge_file_region(resv, rg,
932 rg->to - rg->from, true);
933 list_del(&rg->link);
934 kfree(rg);
935 continue;
936 }
937
938 if (f <= rg->from) { /* Trim beginning of region */
939 hugetlb_cgroup_uncharge_file_region(resv, rg,
940 t - rg->from, false);
941
942 del += t - rg->from;
943 rg->from = t;
944 } else { /* Trim end of region */
945 hugetlb_cgroup_uncharge_file_region(resv, rg,
946 rg->to - f, false);
947
948 del += rg->to - f;
949 rg->to = f;
950 }
951 }
952
953 spin_unlock(&resv->lock);
954 kfree(nrg);
955 return del;
956 }
957
958 /*
959 * A rare out of memory error was encountered which prevented removal of
960 * the reserve map region for a page. The huge page itself was free'ed
961 * and removed from the page cache. This routine will adjust the subpool
962 * usage count, and the global reserve count if needed. By incrementing
963 * these counts, the reserve map entry which could not be deleted will
964 * appear as a "reserved" entry instead of simply dangling with incorrect
965 * counts.
966 */
hugetlb_fix_reserve_counts(struct inode * inode)967 void hugetlb_fix_reserve_counts(struct inode *inode)
968 {
969 struct hugepage_subpool *spool = subpool_inode(inode);
970 long rsv_adjust;
971 bool reserved = false;
972
973 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
974 if (rsv_adjust > 0) {
975 struct hstate *h = hstate_inode(inode);
976
977 if (!hugetlb_acct_memory(h, 1))
978 reserved = true;
979 } else if (!rsv_adjust) {
980 reserved = true;
981 }
982
983 if (!reserved)
984 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
985 }
986
987 /*
988 * Count and return the number of huge pages in the reserve map
989 * that intersect with the range [f, t).
990 */
region_count(struct resv_map * resv,long f,long t)991 static long region_count(struct resv_map *resv, long f, long t)
992 {
993 struct list_head *head = &resv->regions;
994 struct file_region *rg;
995 long chg = 0;
996
997 spin_lock(&resv->lock);
998 /* Locate each segment we overlap with, and count that overlap. */
999 list_for_each_entry(rg, head, link) {
1000 long seg_from;
1001 long seg_to;
1002
1003 if (rg->to <= f)
1004 continue;
1005 if (rg->from >= t)
1006 break;
1007
1008 seg_from = max(rg->from, f);
1009 seg_to = min(rg->to, t);
1010
1011 chg += seg_to - seg_from;
1012 }
1013 spin_unlock(&resv->lock);
1014
1015 return chg;
1016 }
1017
1018 /*
1019 * Convert the address within this vma to the page offset within
1020 * the mapping, huge page units here.
1021 */
vma_hugecache_offset(struct hstate * h,struct vm_area_struct * vma,unsigned long address)1022 static pgoff_t vma_hugecache_offset(struct hstate *h,
1023 struct vm_area_struct *vma, unsigned long address)
1024 {
1025 return ((address - vma->vm_start) >> huge_page_shift(h)) +
1026 (vma->vm_pgoff >> huge_page_order(h));
1027 }
1028
1029 /**
1030 * vma_kernel_pagesize - Page size granularity for this VMA.
1031 * @vma: The user mapping.
1032 *
1033 * Folios in this VMA will be aligned to, and at least the size of the
1034 * number of bytes returned by this function.
1035 *
1036 * Return: The default size of the folios allocated when backing a VMA.
1037 */
vma_kernel_pagesize(struct vm_area_struct * vma)1038 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1039 {
1040 if (vma->vm_ops && vma->vm_ops->pagesize)
1041 return vma->vm_ops->pagesize(vma);
1042 return PAGE_SIZE;
1043 }
1044 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
1045
1046 /*
1047 * Return the page size being used by the MMU to back a VMA. In the majority
1048 * of cases, the page size used by the kernel matches the MMU size. On
1049 * architectures where it differs, an architecture-specific 'strong'
1050 * version of this symbol is required.
1051 */
vma_mmu_pagesize(struct vm_area_struct * vma)1052 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1053 {
1054 return vma_kernel_pagesize(vma);
1055 }
1056
1057 /*
1058 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
1059 * bits of the reservation map pointer, which are always clear due to
1060 * alignment.
1061 */
1062 #define HPAGE_RESV_OWNER (1UL << 0)
1063 #define HPAGE_RESV_UNMAPPED (1UL << 1)
1064 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
1065
1066 /*
1067 * These helpers are used to track how many pages are reserved for
1068 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
1069 * is guaranteed to have their future faults succeed.
1070 *
1071 * With the exception of hugetlb_dup_vma_private() which is called at fork(),
1072 * the reserve counters are updated with the hugetlb_lock held. It is safe
1073 * to reset the VMA at fork() time as it is not in use yet and there is no
1074 * chance of the global counters getting corrupted as a result of the values.
1075 *
1076 * The private mapping reservation is represented in a subtly different
1077 * manner to a shared mapping. A shared mapping has a region map associated
1078 * with the underlying file, this region map represents the backing file
1079 * pages which have ever had a reservation assigned which this persists even
1080 * after the page is instantiated. A private mapping has a region map
1081 * associated with the original mmap which is attached to all VMAs which
1082 * reference it, this region map represents those offsets which have consumed
1083 * reservation ie. where pages have been instantiated.
1084 */
get_vma_private_data(struct vm_area_struct * vma)1085 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
1086 {
1087 return (unsigned long)vma->vm_private_data;
1088 }
1089
set_vma_private_data(struct vm_area_struct * vma,unsigned long value)1090 static void set_vma_private_data(struct vm_area_struct *vma,
1091 unsigned long value)
1092 {
1093 vma->vm_private_data = (void *)value;
1094 }
1095
1096 static void
resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map * resv_map,struct hugetlb_cgroup * h_cg,struct hstate * h)1097 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
1098 struct hugetlb_cgroup *h_cg,
1099 struct hstate *h)
1100 {
1101 #ifdef CONFIG_CGROUP_HUGETLB
1102 if (!h_cg || !h) {
1103 resv_map->reservation_counter = NULL;
1104 resv_map->pages_per_hpage = 0;
1105 resv_map->css = NULL;
1106 } else {
1107 resv_map->reservation_counter =
1108 &h_cg->rsvd_hugepage[hstate_index(h)];
1109 resv_map->pages_per_hpage = pages_per_huge_page(h);
1110 resv_map->css = &h_cg->css;
1111 }
1112 #endif
1113 }
1114
resv_map_alloc(void)1115 struct resv_map *resv_map_alloc(void)
1116 {
1117 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
1118 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
1119
1120 if (!resv_map || !rg) {
1121 kfree(resv_map);
1122 kfree(rg);
1123 return NULL;
1124 }
1125
1126 kref_init(&resv_map->refs);
1127 spin_lock_init(&resv_map->lock);
1128 INIT_LIST_HEAD(&resv_map->regions);
1129 init_rwsem(&resv_map->rw_sema);
1130
1131 resv_map->adds_in_progress = 0;
1132 /*
1133 * Initialize these to 0. On shared mappings, 0's here indicate these
1134 * fields don't do cgroup accounting. On private mappings, these will be
1135 * re-initialized to the proper values, to indicate that hugetlb cgroup
1136 * reservations are to be un-charged from here.
1137 */
1138 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
1139
1140 INIT_LIST_HEAD(&resv_map->region_cache);
1141 list_add(&rg->link, &resv_map->region_cache);
1142 resv_map->region_cache_count = 1;
1143
1144 return resv_map;
1145 }
1146
resv_map_release(struct kref * ref)1147 void resv_map_release(struct kref *ref)
1148 {
1149 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
1150 struct list_head *head = &resv_map->region_cache;
1151 struct file_region *rg, *trg;
1152
1153 /* Clear out any active regions before we release the map. */
1154 region_del(resv_map, 0, LONG_MAX);
1155
1156 /* ... and any entries left in the cache */
1157 list_for_each_entry_safe(rg, trg, head, link) {
1158 list_del(&rg->link);
1159 kfree(rg);
1160 }
1161
1162 VM_BUG_ON(resv_map->adds_in_progress);
1163
1164 kfree(resv_map);
1165 }
1166
inode_resv_map(struct inode * inode)1167 static inline struct resv_map *inode_resv_map(struct inode *inode)
1168 {
1169 /*
1170 * At inode evict time, i_mapping may not point to the original
1171 * address space within the inode. This original address space
1172 * contains the pointer to the resv_map. So, always use the
1173 * address space embedded within the inode.
1174 * The VERY common case is inode->mapping == &inode->i_data but,
1175 * this may not be true for device special inodes.
1176 */
1177 return (struct resv_map *)(&inode->i_data)->i_private_data;
1178 }
1179
vma_resv_map(struct vm_area_struct * vma)1180 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
1181 {
1182 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1183 if (vma->vm_flags & VM_MAYSHARE) {
1184 struct address_space *mapping = vma->vm_file->f_mapping;
1185 struct inode *inode = mapping->host;
1186
1187 return inode_resv_map(inode);
1188
1189 } else {
1190 return (struct resv_map *)(get_vma_private_data(vma) &
1191 ~HPAGE_RESV_MASK);
1192 }
1193 }
1194
set_vma_resv_flags(struct vm_area_struct * vma,unsigned long flags)1195 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
1196 {
1197 VM_WARN_ON_ONCE_VMA(!is_vm_hugetlb_page(vma), vma);
1198 VM_WARN_ON_ONCE_VMA(vma->vm_flags & VM_MAYSHARE, vma);
1199
1200 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
1201 }
1202
set_vma_desc_resv_map(struct vm_area_desc * desc,struct resv_map * map)1203 static void set_vma_desc_resv_map(struct vm_area_desc *desc, struct resv_map *map)
1204 {
1205 VM_WARN_ON_ONCE(!is_vm_hugetlb_flags(desc->vm_flags));
1206 VM_WARN_ON_ONCE(desc->vm_flags & VM_MAYSHARE);
1207
1208 desc->private_data = map;
1209 }
1210
set_vma_desc_resv_flags(struct vm_area_desc * desc,unsigned long flags)1211 static void set_vma_desc_resv_flags(struct vm_area_desc *desc, unsigned long flags)
1212 {
1213 VM_WARN_ON_ONCE(!is_vm_hugetlb_flags(desc->vm_flags));
1214 VM_WARN_ON_ONCE(desc->vm_flags & VM_MAYSHARE);
1215
1216 desc->private_data = (void *)((unsigned long)desc->private_data | flags);
1217 }
1218
is_vma_resv_set(struct vm_area_struct * vma,unsigned long flag)1219 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
1220 {
1221 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1222
1223 return (get_vma_private_data(vma) & flag) != 0;
1224 }
1225
is_vma_desc_resv_set(struct vm_area_desc * desc,unsigned long flag)1226 static bool is_vma_desc_resv_set(struct vm_area_desc *desc, unsigned long flag)
1227 {
1228 VM_WARN_ON_ONCE(!is_vm_hugetlb_flags(desc->vm_flags));
1229
1230 return ((unsigned long)desc->private_data) & flag;
1231 }
1232
__vma_private_lock(struct vm_area_struct * vma)1233 bool __vma_private_lock(struct vm_area_struct *vma)
1234 {
1235 return !(vma->vm_flags & VM_MAYSHARE) &&
1236 get_vma_private_data(vma) & ~HPAGE_RESV_MASK &&
1237 is_vma_resv_set(vma, HPAGE_RESV_OWNER);
1238 }
1239
hugetlb_dup_vma_private(struct vm_area_struct * vma)1240 void hugetlb_dup_vma_private(struct vm_area_struct *vma)
1241 {
1242 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1243 /*
1244 * Clear vm_private_data
1245 * - For shared mappings this is a per-vma semaphore that may be
1246 * allocated in a subsequent call to hugetlb_vm_op_open.
1247 * Before clearing, make sure pointer is not associated with vma
1248 * as this will leak the structure. This is the case when called
1249 * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
1250 * been called to allocate a new structure.
1251 * - For MAP_PRIVATE mappings, this is the reserve map which does
1252 * not apply to children. Faults generated by the children are
1253 * not guaranteed to succeed, even if read-only.
1254 */
1255 if (vma->vm_flags & VM_MAYSHARE) {
1256 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1257
1258 if (vma_lock && vma_lock->vma != vma)
1259 vma->vm_private_data = NULL;
1260 } else
1261 vma->vm_private_data = NULL;
1262 }
1263
1264 /*
1265 * Reset and decrement one ref on hugepage private reservation.
1266 * Called with mm->mmap_lock writer semaphore held.
1267 * This function should be only used by mremap and operate on
1268 * same sized vma. It should never come here with last ref on the
1269 * reservation.
1270 */
clear_vma_resv_huge_pages(struct vm_area_struct * vma)1271 void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
1272 {
1273 /*
1274 * Clear the old hugetlb private page reservation.
1275 * It has already been transferred to new_vma.
1276 *
1277 * During a mremap() operation of a hugetlb vma we call move_vma()
1278 * which copies vma into new_vma and unmaps vma. After the copy
1279 * operation both new_vma and vma share a reference to the resv_map
1280 * struct, and at that point vma is about to be unmapped. We don't
1281 * want to return the reservation to the pool at unmap of vma because
1282 * the reservation still lives on in new_vma, so simply decrement the
1283 * ref here and remove the resv_map reference from this vma.
1284 */
1285 struct resv_map *reservations = vma_resv_map(vma);
1286
1287 if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1288 resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
1289 kref_put(&reservations->refs, resv_map_release);
1290 }
1291
1292 hugetlb_dup_vma_private(vma);
1293 }
1294
enqueue_hugetlb_folio(struct hstate * h,struct folio * folio)1295 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
1296 {
1297 int nid = folio_nid(folio);
1298
1299 lockdep_assert_held(&hugetlb_lock);
1300 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1301
1302 list_move(&folio->lru, &h->hugepage_freelists[nid]);
1303 h->free_huge_pages++;
1304 h->free_huge_pages_node[nid]++;
1305 folio_set_hugetlb_freed(folio);
1306 }
1307
dequeue_hugetlb_folio_node_exact(struct hstate * h,int nid)1308 static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
1309 int nid)
1310 {
1311 struct folio *folio;
1312 bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1313
1314 lockdep_assert_held(&hugetlb_lock);
1315 list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) {
1316 if (pin && !folio_is_longterm_pinnable(folio))
1317 continue;
1318
1319 if (folio_test_hwpoison(folio))
1320 continue;
1321
1322 if (is_migrate_isolate_page(&folio->page))
1323 continue;
1324
1325 list_move(&folio->lru, &h->hugepage_activelist);
1326 folio_ref_unfreeze(folio, 1);
1327 folio_clear_hugetlb_freed(folio);
1328 h->free_huge_pages--;
1329 h->free_huge_pages_node[nid]--;
1330 return folio;
1331 }
1332
1333 return NULL;
1334 }
1335
dequeue_hugetlb_folio_nodemask(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)1336 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
1337 int nid, nodemask_t *nmask)
1338 {
1339 unsigned int cpuset_mems_cookie;
1340 struct zonelist *zonelist;
1341 struct zone *zone;
1342 struct zoneref *z;
1343 int node = NUMA_NO_NODE;
1344
1345 /* 'nid' should not be NUMA_NO_NODE. Try to catch any misuse of it and rectifiy. */
1346 if (nid == NUMA_NO_NODE)
1347 nid = numa_node_id();
1348
1349 zonelist = node_zonelist(nid, gfp_mask);
1350
1351 retry_cpuset:
1352 cpuset_mems_cookie = read_mems_allowed_begin();
1353 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1354 struct folio *folio;
1355
1356 if (!cpuset_zone_allowed(zone, gfp_mask))
1357 continue;
1358 /*
1359 * no need to ask again on the same node. Pool is node rather than
1360 * zone aware
1361 */
1362 if (zone_to_nid(zone) == node)
1363 continue;
1364 node = zone_to_nid(zone);
1365
1366 folio = dequeue_hugetlb_folio_node_exact(h, node);
1367 if (folio)
1368 return folio;
1369 }
1370 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1371 goto retry_cpuset;
1372
1373 return NULL;
1374 }
1375
available_huge_pages(struct hstate * h)1376 static unsigned long available_huge_pages(struct hstate *h)
1377 {
1378 return h->free_huge_pages - h->resv_huge_pages;
1379 }
1380
dequeue_hugetlb_folio_vma(struct hstate * h,struct vm_area_struct * vma,unsigned long address,long gbl_chg)1381 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
1382 struct vm_area_struct *vma,
1383 unsigned long address, long gbl_chg)
1384 {
1385 struct folio *folio = NULL;
1386 struct mempolicy *mpol;
1387 gfp_t gfp_mask;
1388 nodemask_t *nodemask;
1389 int nid;
1390
1391 /*
1392 * gbl_chg==1 means the allocation requires a new page that was not
1393 * reserved before. Making sure there's at least one free page.
1394 */
1395 if (gbl_chg && !available_huge_pages(h))
1396 goto err;
1397
1398 gfp_mask = htlb_alloc_mask(h);
1399 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1400
1401 if (mpol_is_preferred_many(mpol)) {
1402 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1403 nid, nodemask);
1404
1405 /* Fallback to all nodes if page==NULL */
1406 nodemask = NULL;
1407 }
1408
1409 if (!folio)
1410 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1411 nid, nodemask);
1412
1413 mpol_cond_put(mpol);
1414 return folio;
1415
1416 err:
1417 return NULL;
1418 }
1419
1420 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1421 #ifdef CONFIG_CONTIG_ALLOC
alloc_gigantic_folio(int order,gfp_t gfp_mask,int nid,nodemask_t * nodemask)1422 static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask,
1423 int nid, nodemask_t *nodemask)
1424 {
1425 struct folio *folio;
1426 bool retried = false;
1427
1428 retry:
1429 folio = hugetlb_cma_alloc_folio(order, gfp_mask, nid, nodemask);
1430 if (!folio) {
1431 if (hugetlb_cma_exclusive_alloc())
1432 return NULL;
1433
1434 folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask);
1435 if (!folio)
1436 return NULL;
1437 }
1438
1439 if (folio_ref_freeze(folio, 1))
1440 return folio;
1441
1442 pr_warn("HugeTLB: unexpected refcount on PFN %lu\n", folio_pfn(folio));
1443 hugetlb_free_folio(folio);
1444 if (!retried) {
1445 retried = true;
1446 goto retry;
1447 }
1448 return NULL;
1449 }
1450
1451 #else /* !CONFIG_CONTIG_ALLOC */
alloc_gigantic_folio(int order,gfp_t gfp_mask,int nid,nodemask_t * nodemask)1452 static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid,
1453 nodemask_t *nodemask)
1454 {
1455 return NULL;
1456 }
1457 #endif /* CONFIG_CONTIG_ALLOC */
1458
1459 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
alloc_gigantic_folio(int order,gfp_t gfp_mask,int nid,nodemask_t * nodemask)1460 static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, int nid,
1461 nodemask_t *nodemask)
1462 {
1463 return NULL;
1464 }
1465 #endif
1466
1467 /*
1468 * Remove hugetlb folio from lists.
1469 * If vmemmap exists for the folio, clear the hugetlb flag so that the
1470 * folio appears as just a compound page. Otherwise, wait until after
1471 * allocating vmemmap to clear the flag.
1472 *
1473 * Must be called with hugetlb lock held.
1474 */
remove_hugetlb_folio(struct hstate * h,struct folio * folio,bool adjust_surplus)1475 void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
1476 bool adjust_surplus)
1477 {
1478 int nid = folio_nid(folio);
1479
1480 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio);
1481 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio);
1482
1483 lockdep_assert_held(&hugetlb_lock);
1484 if (hstate_is_gigantic_no_runtime(h))
1485 return;
1486
1487 list_del(&folio->lru);
1488
1489 if (folio_test_hugetlb_freed(folio)) {
1490 folio_clear_hugetlb_freed(folio);
1491 h->free_huge_pages--;
1492 h->free_huge_pages_node[nid]--;
1493 }
1494 if (adjust_surplus) {
1495 h->surplus_huge_pages--;
1496 h->surplus_huge_pages_node[nid]--;
1497 }
1498
1499 /*
1500 * We can only clear the hugetlb flag after allocating vmemmap
1501 * pages. Otherwise, someone (memory error handling) may try to write
1502 * to tail struct pages.
1503 */
1504 if (!folio_test_hugetlb_vmemmap_optimized(folio))
1505 __folio_clear_hugetlb(folio);
1506
1507 h->nr_huge_pages--;
1508 h->nr_huge_pages_node[nid]--;
1509 }
1510
add_hugetlb_folio(struct hstate * h,struct folio * folio,bool adjust_surplus)1511 void add_hugetlb_folio(struct hstate *h, struct folio *folio,
1512 bool adjust_surplus)
1513 {
1514 int nid = folio_nid(folio);
1515
1516 VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio);
1517
1518 lockdep_assert_held(&hugetlb_lock);
1519
1520 INIT_LIST_HEAD(&folio->lru);
1521 h->nr_huge_pages++;
1522 h->nr_huge_pages_node[nid]++;
1523
1524 if (adjust_surplus) {
1525 h->surplus_huge_pages++;
1526 h->surplus_huge_pages_node[nid]++;
1527 }
1528
1529 __folio_set_hugetlb(folio);
1530 folio_change_private(folio, NULL);
1531 /*
1532 * We have to set hugetlb_vmemmap_optimized again as above
1533 * folio_change_private(folio, NULL) cleared it.
1534 */
1535 folio_set_hugetlb_vmemmap_optimized(folio);
1536
1537 arch_clear_hugetlb_flags(folio);
1538 enqueue_hugetlb_folio(h, folio);
1539 }
1540
__update_and_free_hugetlb_folio(struct hstate * h,struct folio * folio)1541 static void __update_and_free_hugetlb_folio(struct hstate *h,
1542 struct folio *folio)
1543 {
1544 bool clear_flag = folio_test_hugetlb_vmemmap_optimized(folio);
1545
1546 if (hstate_is_gigantic_no_runtime(h))
1547 return;
1548
1549 /*
1550 * If we don't know which subpages are hwpoisoned, we can't free
1551 * the hugepage, so it's leaked intentionally.
1552 */
1553 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1554 return;
1555
1556 /*
1557 * If folio is not vmemmap optimized (!clear_flag), then the folio
1558 * is no longer identified as a hugetlb page. hugetlb_vmemmap_restore_folio
1559 * can only be passed hugetlb pages and will BUG otherwise.
1560 */
1561 if (clear_flag && hugetlb_vmemmap_restore_folio(h, folio)) {
1562 spin_lock_irq(&hugetlb_lock);
1563 /*
1564 * If we cannot allocate vmemmap pages, just refuse to free the
1565 * page and put the page back on the hugetlb free list and treat
1566 * as a surplus page.
1567 */
1568 add_hugetlb_folio(h, folio, true);
1569 spin_unlock_irq(&hugetlb_lock);
1570 return;
1571 }
1572
1573 /*
1574 * If vmemmap pages were allocated above, then we need to clear the
1575 * hugetlb flag under the hugetlb lock.
1576 */
1577 if (folio_test_hugetlb(folio)) {
1578 spin_lock_irq(&hugetlb_lock);
1579 __folio_clear_hugetlb(folio);
1580 spin_unlock_irq(&hugetlb_lock);
1581 }
1582
1583 /*
1584 * Move PageHWPoison flag from head page to the raw error pages,
1585 * which makes any healthy subpages reusable.
1586 */
1587 if (unlikely(folio_test_hwpoison(folio)))
1588 folio_clear_hugetlb_hwpoison(folio);
1589
1590 folio_ref_unfreeze(folio, 1);
1591
1592 hugetlb_free_folio(folio);
1593 }
1594
1595 /*
1596 * As update_and_free_hugetlb_folio() can be called under any context, so we cannot
1597 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1598 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1599 * the vmemmap pages.
1600 *
1601 * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1602 * freed and frees them one-by-one. As the page->mapping pointer is going
1603 * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1604 * structure of a lockless linked list of huge pages to be freed.
1605 */
1606 static LLIST_HEAD(hpage_freelist);
1607
free_hpage_workfn(struct work_struct * work)1608 static void free_hpage_workfn(struct work_struct *work)
1609 {
1610 struct llist_node *node;
1611
1612 node = llist_del_all(&hpage_freelist);
1613
1614 while (node) {
1615 struct folio *folio;
1616 struct hstate *h;
1617
1618 folio = container_of((struct address_space **)node,
1619 struct folio, mapping);
1620 node = node->next;
1621 folio->mapping = NULL;
1622 /*
1623 * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in
1624 * folio_hstate() is going to trigger because a previous call to
1625 * remove_hugetlb_folio() will clear the hugetlb bit, so do
1626 * not use folio_hstate() directly.
1627 */
1628 h = size_to_hstate(folio_size(folio));
1629
1630 __update_and_free_hugetlb_folio(h, folio);
1631
1632 cond_resched();
1633 }
1634 }
1635 static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1636
flush_free_hpage_work(struct hstate * h)1637 static inline void flush_free_hpage_work(struct hstate *h)
1638 {
1639 if (hugetlb_vmemmap_optimizable(h))
1640 flush_work(&free_hpage_work);
1641 }
1642
update_and_free_hugetlb_folio(struct hstate * h,struct folio * folio,bool atomic)1643 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
1644 bool atomic)
1645 {
1646 if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) {
1647 __update_and_free_hugetlb_folio(h, folio);
1648 return;
1649 }
1650
1651 /*
1652 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1653 *
1654 * Only call schedule_work() if hpage_freelist is previously
1655 * empty. Otherwise, schedule_work() had been called but the workfn
1656 * hasn't retrieved the list yet.
1657 */
1658 if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist))
1659 schedule_work(&free_hpage_work);
1660 }
1661
bulk_vmemmap_restore_error(struct hstate * h,struct list_head * folio_list,struct list_head * non_hvo_folios)1662 static void bulk_vmemmap_restore_error(struct hstate *h,
1663 struct list_head *folio_list,
1664 struct list_head *non_hvo_folios)
1665 {
1666 struct folio *folio, *t_folio;
1667
1668 if (!list_empty(non_hvo_folios)) {
1669 /*
1670 * Free any restored hugetlb pages so that restore of the
1671 * entire list can be retried.
1672 * The idea is that in the common case of ENOMEM errors freeing
1673 * hugetlb pages with vmemmap we will free up memory so that we
1674 * can allocate vmemmap for more hugetlb pages.
1675 */
1676 list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) {
1677 list_del(&folio->lru);
1678 spin_lock_irq(&hugetlb_lock);
1679 __folio_clear_hugetlb(folio);
1680 spin_unlock_irq(&hugetlb_lock);
1681 update_and_free_hugetlb_folio(h, folio, false);
1682 cond_resched();
1683 }
1684 } else {
1685 /*
1686 * In the case where there are no folios which can be
1687 * immediately freed, we loop through the list trying to restore
1688 * vmemmap individually in the hope that someone elsewhere may
1689 * have done something to cause success (such as freeing some
1690 * memory). If unable to restore a hugetlb page, the hugetlb
1691 * page is made a surplus page and removed from the list.
1692 * If are able to restore vmemmap and free one hugetlb page, we
1693 * quit processing the list to retry the bulk operation.
1694 */
1695 list_for_each_entry_safe(folio, t_folio, folio_list, lru)
1696 if (hugetlb_vmemmap_restore_folio(h, folio)) {
1697 list_del(&folio->lru);
1698 spin_lock_irq(&hugetlb_lock);
1699 add_hugetlb_folio(h, folio, true);
1700 spin_unlock_irq(&hugetlb_lock);
1701 } else {
1702 list_del(&folio->lru);
1703 spin_lock_irq(&hugetlb_lock);
1704 __folio_clear_hugetlb(folio);
1705 spin_unlock_irq(&hugetlb_lock);
1706 update_and_free_hugetlb_folio(h, folio, false);
1707 cond_resched();
1708 break;
1709 }
1710 }
1711 }
1712
update_and_free_pages_bulk(struct hstate * h,struct list_head * folio_list)1713 static void update_and_free_pages_bulk(struct hstate *h,
1714 struct list_head *folio_list)
1715 {
1716 long ret;
1717 struct folio *folio, *t_folio;
1718 LIST_HEAD(non_hvo_folios);
1719
1720 /*
1721 * First allocate required vmemmmap (if necessary) for all folios.
1722 * Carefully handle errors and free up any available hugetlb pages
1723 * in an effort to make forward progress.
1724 */
1725 retry:
1726 ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios);
1727 if (ret < 0) {
1728 bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios);
1729 goto retry;
1730 }
1731
1732 /*
1733 * At this point, list should be empty, ret should be >= 0 and there
1734 * should only be pages on the non_hvo_folios list.
1735 * Do note that the non_hvo_folios list could be empty.
1736 * Without HVO enabled, ret will be 0 and there is no need to call
1737 * __folio_clear_hugetlb as this was done previously.
1738 */
1739 VM_WARN_ON(!list_empty(folio_list));
1740 VM_WARN_ON(ret < 0);
1741 if (!list_empty(&non_hvo_folios) && ret) {
1742 spin_lock_irq(&hugetlb_lock);
1743 list_for_each_entry(folio, &non_hvo_folios, lru)
1744 __folio_clear_hugetlb(folio);
1745 spin_unlock_irq(&hugetlb_lock);
1746 }
1747
1748 list_for_each_entry_safe(folio, t_folio, &non_hvo_folios, lru) {
1749 update_and_free_hugetlb_folio(h, folio, false);
1750 cond_resched();
1751 }
1752 }
1753
size_to_hstate(unsigned long size)1754 struct hstate *size_to_hstate(unsigned long size)
1755 {
1756 struct hstate *h;
1757
1758 for_each_hstate(h) {
1759 if (huge_page_size(h) == size)
1760 return h;
1761 }
1762 return NULL;
1763 }
1764
free_huge_folio(struct folio * folio)1765 void free_huge_folio(struct folio *folio)
1766 {
1767 /*
1768 * Can't pass hstate in here because it is called from the
1769 * generic mm code.
1770 */
1771 struct hstate *h = folio_hstate(folio);
1772 int nid = folio_nid(folio);
1773 struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
1774 bool restore_reserve;
1775 unsigned long flags;
1776
1777 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1778 VM_BUG_ON_FOLIO(folio_mapcount(folio), folio);
1779
1780 hugetlb_set_folio_subpool(folio, NULL);
1781 if (folio_test_anon(folio))
1782 __ClearPageAnonExclusive(&folio->page);
1783 folio->mapping = NULL;
1784 restore_reserve = folio_test_hugetlb_restore_reserve(folio);
1785 folio_clear_hugetlb_restore_reserve(folio);
1786
1787 /*
1788 * If HPageRestoreReserve was set on page, page allocation consumed a
1789 * reservation. If the page was associated with a subpool, there
1790 * would have been a page reserved in the subpool before allocation
1791 * via hugepage_subpool_get_pages(). Since we are 'restoring' the
1792 * reservation, do not call hugepage_subpool_put_pages() as this will
1793 * remove the reserved page from the subpool.
1794 */
1795 if (!restore_reserve) {
1796 /*
1797 * A return code of zero implies that the subpool will be
1798 * under its minimum size if the reservation is not restored
1799 * after page is free. Therefore, force restore_reserve
1800 * operation.
1801 */
1802 if (hugepage_subpool_put_pages(spool, 1) == 0)
1803 restore_reserve = true;
1804 }
1805
1806 spin_lock_irqsave(&hugetlb_lock, flags);
1807 folio_clear_hugetlb_migratable(folio);
1808 hugetlb_cgroup_uncharge_folio(hstate_index(h),
1809 pages_per_huge_page(h), folio);
1810 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
1811 pages_per_huge_page(h), folio);
1812 lruvec_stat_mod_folio(folio, NR_HUGETLB, -pages_per_huge_page(h));
1813 mem_cgroup_uncharge(folio);
1814 if (restore_reserve)
1815 h->resv_huge_pages++;
1816
1817 if (folio_test_hugetlb_temporary(folio)) {
1818 remove_hugetlb_folio(h, folio, false);
1819 spin_unlock_irqrestore(&hugetlb_lock, flags);
1820 update_and_free_hugetlb_folio(h, folio, true);
1821 } else if (h->surplus_huge_pages_node[nid]) {
1822 /* remove the page from active list */
1823 remove_hugetlb_folio(h, folio, true);
1824 spin_unlock_irqrestore(&hugetlb_lock, flags);
1825 update_and_free_hugetlb_folio(h, folio, true);
1826 } else {
1827 arch_clear_hugetlb_flags(folio);
1828 enqueue_hugetlb_folio(h, folio);
1829 spin_unlock_irqrestore(&hugetlb_lock, flags);
1830 }
1831 }
1832
1833 /*
1834 * Must be called with the hugetlb lock held
1835 */
account_new_hugetlb_folio(struct hstate * h,struct folio * folio)1836 static void account_new_hugetlb_folio(struct hstate *h, struct folio *folio)
1837 {
1838 lockdep_assert_held(&hugetlb_lock);
1839 h->nr_huge_pages++;
1840 h->nr_huge_pages_node[folio_nid(folio)]++;
1841 }
1842
init_new_hugetlb_folio(struct folio * folio)1843 void init_new_hugetlb_folio(struct folio *folio)
1844 {
1845 __folio_set_hugetlb(folio);
1846 INIT_LIST_HEAD(&folio->lru);
1847 hugetlb_set_folio_subpool(folio, NULL);
1848 set_hugetlb_cgroup(folio, NULL);
1849 set_hugetlb_cgroup_rsvd(folio, NULL);
1850 }
1851
1852 /*
1853 * Find and lock address space (mapping) in write mode.
1854 *
1855 * Upon entry, the folio is locked which means that folio_mapping() is
1856 * stable. Due to locking order, we can only trylock_write. If we can
1857 * not get the lock, simply return NULL to caller.
1858 */
hugetlb_folio_mapping_lock_write(struct folio * folio)1859 struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
1860 {
1861 struct address_space *mapping = folio_mapping(folio);
1862
1863 if (!mapping)
1864 return mapping;
1865
1866 if (i_mmap_trylock_write(mapping))
1867 return mapping;
1868
1869 return NULL;
1870 }
1871
alloc_buddy_hugetlb_folio(int order,gfp_t gfp_mask,int nid,nodemask_t * nmask,nodemask_t * node_alloc_noretry)1872 static struct folio *alloc_buddy_hugetlb_folio(int order, gfp_t gfp_mask,
1873 int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry)
1874 {
1875 struct folio *folio;
1876 bool alloc_try_hard = true;
1877
1878 /*
1879 * By default we always try hard to allocate the folio with
1880 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating folios in
1881 * a loop (to adjust global huge page counts) and previous allocation
1882 * failed, do not continue to try hard on the same node. Use the
1883 * node_alloc_noretry bitmap to manage this state information.
1884 */
1885 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1886 alloc_try_hard = false;
1887 if (alloc_try_hard)
1888 gfp_mask |= __GFP_RETRY_MAYFAIL;
1889
1890 folio = (struct folio *)__alloc_frozen_pages(gfp_mask, order, nid, nmask);
1891
1892 /*
1893 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a
1894 * folio this indicates an overall state change. Clear bit so
1895 * that we resume normal 'try hard' allocations.
1896 */
1897 if (node_alloc_noretry && folio && !alloc_try_hard)
1898 node_clear(nid, *node_alloc_noretry);
1899
1900 /*
1901 * If we tried hard to get a folio but failed, set bit so that
1902 * subsequent attempts will not try as hard until there is an
1903 * overall state change.
1904 */
1905 if (node_alloc_noretry && !folio && alloc_try_hard)
1906 node_set(nid, *node_alloc_noretry);
1907
1908 if (!folio) {
1909 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1910 return NULL;
1911 }
1912
1913 __count_vm_event(HTLB_BUDDY_PGALLOC);
1914 return folio;
1915 }
1916
only_alloc_fresh_hugetlb_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask,nodemask_t * node_alloc_noretry)1917 static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
1918 gfp_t gfp_mask, int nid, nodemask_t *nmask,
1919 nodemask_t *node_alloc_noretry)
1920 {
1921 struct folio *folio;
1922 int order = huge_page_order(h);
1923
1924 if (nid == NUMA_NO_NODE)
1925 nid = numa_mem_id();
1926
1927 if (order_is_gigantic(order))
1928 folio = alloc_gigantic_folio(order, gfp_mask, nid, nmask);
1929 else
1930 folio = alloc_buddy_hugetlb_folio(order, gfp_mask, nid, nmask,
1931 node_alloc_noretry);
1932 if (folio)
1933 init_new_hugetlb_folio(folio);
1934 return folio;
1935 }
1936
1937 /*
1938 * Common helper to allocate a fresh hugetlb folio. All specific allocators
1939 * should use this function to get new hugetlb folio
1940 *
1941 * Note that returned folio is 'frozen': ref count of head page and all tail
1942 * pages is zero, and the accounting must be done in the caller.
1943 */
alloc_fresh_hugetlb_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)1944 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
1945 gfp_t gfp_mask, int nid, nodemask_t *nmask)
1946 {
1947 struct folio *folio;
1948
1949 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
1950 if (folio)
1951 hugetlb_vmemmap_optimize_folio(h, folio);
1952 return folio;
1953 }
1954
prep_and_add_allocated_folios(struct hstate * h,struct list_head * folio_list)1955 void prep_and_add_allocated_folios(struct hstate *h,
1956 struct list_head *folio_list)
1957 {
1958 unsigned long flags;
1959 struct folio *folio, *tmp_f;
1960
1961 /* Send list for bulk vmemmap optimization processing */
1962 hugetlb_vmemmap_optimize_folios(h, folio_list);
1963
1964 /* Add all new pool pages to free lists in one lock cycle */
1965 spin_lock_irqsave(&hugetlb_lock, flags);
1966 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
1967 account_new_hugetlb_folio(h, folio);
1968 enqueue_hugetlb_folio(h, folio);
1969 }
1970 spin_unlock_irqrestore(&hugetlb_lock, flags);
1971 }
1972
1973 /*
1974 * Allocates a fresh hugetlb page in a node interleaved manner. The page
1975 * will later be added to the appropriate hugetlb pool.
1976 */
alloc_pool_huge_folio(struct hstate * h,nodemask_t * nodes_allowed,nodemask_t * node_alloc_noretry,int * next_node)1977 static struct folio *alloc_pool_huge_folio(struct hstate *h,
1978 nodemask_t *nodes_allowed,
1979 nodemask_t *node_alloc_noretry,
1980 int *next_node)
1981 {
1982 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1983 int nr_nodes, node;
1984
1985 for_each_node_mask_to_alloc(next_node, nr_nodes, node, nodes_allowed) {
1986 struct folio *folio;
1987
1988 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node,
1989 nodes_allowed, node_alloc_noretry);
1990 if (folio)
1991 return folio;
1992 }
1993
1994 return NULL;
1995 }
1996
1997 /*
1998 * Remove huge page from pool from next node to free. Attempt to keep
1999 * persistent huge pages more or less balanced over allowed nodes.
2000 * This routine only 'removes' the hugetlb page. The caller must make
2001 * an additional call to free the page to low level allocators.
2002 * Called with hugetlb_lock locked.
2003 */
remove_pool_hugetlb_folio(struct hstate * h,nodemask_t * nodes_allowed,bool acct_surplus)2004 static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
2005 nodemask_t *nodes_allowed, bool acct_surplus)
2006 {
2007 int nr_nodes, node;
2008 struct folio *folio = NULL;
2009
2010 lockdep_assert_held(&hugetlb_lock);
2011 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2012 /*
2013 * If we're returning unused surplus pages, only examine
2014 * nodes with surplus pages.
2015 */
2016 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
2017 !list_empty(&h->hugepage_freelists[node])) {
2018 folio = list_entry(h->hugepage_freelists[node].next,
2019 struct folio, lru);
2020 remove_hugetlb_folio(h, folio, acct_surplus);
2021 break;
2022 }
2023 }
2024
2025 return folio;
2026 }
2027
2028 /*
2029 * Dissolve a given free hugetlb folio into free buddy pages. This function
2030 * does nothing for in-use hugetlb folios and non-hugetlb folios.
2031 * This function returns values like below:
2032 *
2033 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2034 * when the system is under memory pressure and the feature of
2035 * freeing unused vmemmap pages associated with each hugetlb page
2036 * is enabled.
2037 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
2038 * (allocated or reserved.)
2039 * 0: successfully dissolved free hugepages or the page is not a
2040 * hugepage (considered as already dissolved)
2041 */
dissolve_free_hugetlb_folio(struct folio * folio)2042 int dissolve_free_hugetlb_folio(struct folio *folio)
2043 {
2044 int rc = -EBUSY;
2045
2046 retry:
2047 /* Not to disrupt normal path by vainly holding hugetlb_lock */
2048 if (!folio_test_hugetlb(folio))
2049 return 0;
2050
2051 spin_lock_irq(&hugetlb_lock);
2052 if (!folio_test_hugetlb(folio)) {
2053 rc = 0;
2054 goto out;
2055 }
2056
2057 if (!folio_ref_count(folio)) {
2058 struct hstate *h = folio_hstate(folio);
2059 bool adjust_surplus = false;
2060
2061 if (!available_huge_pages(h))
2062 goto out;
2063
2064 /*
2065 * We should make sure that the page is already on the free list
2066 * when it is dissolved.
2067 */
2068 if (unlikely(!folio_test_hugetlb_freed(folio))) {
2069 spin_unlock_irq(&hugetlb_lock);
2070 cond_resched();
2071
2072 /*
2073 * Theoretically, we should return -EBUSY when we
2074 * encounter this race. In fact, we have a chance
2075 * to successfully dissolve the page if we do a
2076 * retry. Because the race window is quite small.
2077 * If we seize this opportunity, it is an optimization
2078 * for increasing the success rate of dissolving page.
2079 */
2080 goto retry;
2081 }
2082
2083 if (h->surplus_huge_pages_node[folio_nid(folio)])
2084 adjust_surplus = true;
2085 remove_hugetlb_folio(h, folio, adjust_surplus);
2086 h->max_huge_pages--;
2087 spin_unlock_irq(&hugetlb_lock);
2088
2089 /*
2090 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap
2091 * before freeing the page. update_and_free_hugtlb_folio will fail to
2092 * free the page if it can not allocate required vmemmap. We
2093 * need to adjust max_huge_pages if the page is not freed.
2094 * Attempt to allocate vmemmmap here so that we can take
2095 * appropriate action on failure.
2096 *
2097 * The folio_test_hugetlb check here is because
2098 * remove_hugetlb_folio will clear hugetlb folio flag for
2099 * non-vmemmap optimized hugetlb folios.
2100 */
2101 if (folio_test_hugetlb(folio)) {
2102 rc = hugetlb_vmemmap_restore_folio(h, folio);
2103 if (rc) {
2104 spin_lock_irq(&hugetlb_lock);
2105 add_hugetlb_folio(h, folio, adjust_surplus);
2106 h->max_huge_pages++;
2107 goto out;
2108 }
2109 } else
2110 rc = 0;
2111
2112 update_and_free_hugetlb_folio(h, folio, false);
2113 return rc;
2114 }
2115 out:
2116 spin_unlock_irq(&hugetlb_lock);
2117 return rc;
2118 }
2119
2120 /*
2121 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2122 * make specified memory blocks removable from the system.
2123 * Note that this will dissolve a free gigantic hugepage completely, if any
2124 * part of it lies within the given range.
2125 * Also note that if dissolve_free_hugetlb_folio() returns with an error, all
2126 * free hugetlb folios that were dissolved before that error are lost.
2127 */
dissolve_free_hugetlb_folios(unsigned long start_pfn,unsigned long end_pfn)2128 int dissolve_free_hugetlb_folios(unsigned long start_pfn, unsigned long end_pfn)
2129 {
2130 unsigned long pfn;
2131 struct folio *folio;
2132 int rc = 0;
2133 unsigned int order;
2134 struct hstate *h;
2135
2136 if (!hugepages_supported())
2137 return rc;
2138
2139 order = huge_page_order(&default_hstate);
2140 for_each_hstate(h)
2141 order = min(order, huge_page_order(h));
2142
2143 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
2144 folio = pfn_folio(pfn);
2145 rc = dissolve_free_hugetlb_folio(folio);
2146 if (rc)
2147 break;
2148 }
2149
2150 return rc;
2151 }
2152
2153 /*
2154 * Allocates a fresh surplus page from the page allocator.
2155 */
alloc_surplus_hugetlb_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)2156 static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
2157 gfp_t gfp_mask, int nid, nodemask_t *nmask)
2158 {
2159 struct folio *folio = NULL;
2160
2161 if (hstate_is_gigantic_no_runtime(h))
2162 return NULL;
2163
2164 spin_lock_irq(&hugetlb_lock);
2165 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
2166 goto out_unlock;
2167 spin_unlock_irq(&hugetlb_lock);
2168
2169 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask);
2170 if (!folio)
2171 return NULL;
2172
2173 spin_lock_irq(&hugetlb_lock);
2174 /*
2175 * nr_huge_pages needs to be adjusted within the same lock cycle
2176 * as surplus_pages, otherwise it might confuse
2177 * persistent_huge_pages() momentarily.
2178 */
2179 account_new_hugetlb_folio(h, folio);
2180
2181 /*
2182 * We could have raced with the pool size change.
2183 * Double check that and simply deallocate the new page
2184 * if we would end up overcommiting the surpluses. Abuse
2185 * temporary page to workaround the nasty free_huge_folio
2186 * codeflow
2187 */
2188 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
2189 folio_set_hugetlb_temporary(folio);
2190 spin_unlock_irq(&hugetlb_lock);
2191 free_huge_folio(folio);
2192 return NULL;
2193 }
2194
2195 h->surplus_huge_pages++;
2196 h->surplus_huge_pages_node[folio_nid(folio)]++;
2197
2198 out_unlock:
2199 spin_unlock_irq(&hugetlb_lock);
2200
2201 return folio;
2202 }
2203
alloc_migrate_hugetlb_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)2204 static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
2205 int nid, nodemask_t *nmask)
2206 {
2207 struct folio *folio;
2208
2209 if (hstate_is_gigantic(h))
2210 return NULL;
2211
2212 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask);
2213 if (!folio)
2214 return NULL;
2215
2216 spin_lock_irq(&hugetlb_lock);
2217 account_new_hugetlb_folio(h, folio);
2218 spin_unlock_irq(&hugetlb_lock);
2219
2220 /* fresh huge pages are frozen */
2221 folio_ref_unfreeze(folio, 1);
2222 /*
2223 * We do not account these pages as surplus because they are only
2224 * temporary and will be released properly on the last reference
2225 */
2226 folio_set_hugetlb_temporary(folio);
2227
2228 return folio;
2229 }
2230
2231 /*
2232 * Use the VMA's mpolicy to allocate a huge page from the buddy.
2233 */
2234 static
alloc_buddy_hugetlb_folio_with_mpol(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2235 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
2236 struct vm_area_struct *vma, unsigned long addr)
2237 {
2238 struct folio *folio = NULL;
2239 struct mempolicy *mpol;
2240 gfp_t gfp_mask = htlb_alloc_mask(h);
2241 int nid;
2242 nodemask_t *nodemask;
2243
2244 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
2245 if (mpol_is_preferred_many(mpol)) {
2246 gfp_t gfp = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2247
2248 folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
2249
2250 /* Fallback to all nodes if page==NULL */
2251 nodemask = NULL;
2252 }
2253
2254 if (!folio)
2255 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
2256 mpol_cond_put(mpol);
2257 return folio;
2258 }
2259
alloc_hugetlb_folio_reserve(struct hstate * h,int preferred_nid,nodemask_t * nmask,gfp_t gfp_mask)2260 struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
2261 nodemask_t *nmask, gfp_t gfp_mask)
2262 {
2263 struct folio *folio;
2264
2265 spin_lock_irq(&hugetlb_lock);
2266 if (!h->resv_huge_pages) {
2267 spin_unlock_irq(&hugetlb_lock);
2268 return NULL;
2269 }
2270
2271 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid,
2272 nmask);
2273 if (folio)
2274 h->resv_huge_pages--;
2275
2276 spin_unlock_irq(&hugetlb_lock);
2277 return folio;
2278 }
2279
2280 /* folio migration callback function */
alloc_hugetlb_folio_nodemask(struct hstate * h,int preferred_nid,nodemask_t * nmask,gfp_t gfp_mask,bool allow_alloc_fallback)2281 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
2282 nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback)
2283 {
2284 spin_lock_irq(&hugetlb_lock);
2285 if (available_huge_pages(h)) {
2286 struct folio *folio;
2287
2288 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
2289 preferred_nid, nmask);
2290 if (folio) {
2291 spin_unlock_irq(&hugetlb_lock);
2292 return folio;
2293 }
2294 }
2295 spin_unlock_irq(&hugetlb_lock);
2296
2297 /* We cannot fallback to other nodes, as we could break the per-node pool. */
2298 if (!allow_alloc_fallback)
2299 gfp_mask |= __GFP_THISNODE;
2300
2301 return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask);
2302 }
2303
policy_mbind_nodemask(gfp_t gfp)2304 static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
2305 {
2306 #ifdef CONFIG_NUMA
2307 struct mempolicy *mpol = get_task_policy(current);
2308
2309 /*
2310 * Only enforce MPOL_BIND policy which overlaps with cpuset policy
2311 * (from policy_nodemask) specifically for hugetlb case
2312 */
2313 if (mpol->mode == MPOL_BIND &&
2314 (apply_policy_zone(mpol, gfp_zone(gfp)) &&
2315 cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
2316 return &mpol->nodes;
2317 #endif
2318 return NULL;
2319 }
2320
2321 /*
2322 * Increase the hugetlb pool such that it can accommodate a reservation
2323 * of size 'delta'.
2324 */
gather_surplus_pages(struct hstate * h,long delta)2325 static int gather_surplus_pages(struct hstate *h, long delta)
2326 __must_hold(&hugetlb_lock)
2327 {
2328 LIST_HEAD(surplus_list);
2329 struct folio *folio, *tmp;
2330 int ret;
2331 long i;
2332 long needed, allocated;
2333 bool alloc_ok = true;
2334 nodemask_t *mbind_nodemask, alloc_nodemask;
2335
2336 mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h));
2337 if (mbind_nodemask)
2338 nodes_and(alloc_nodemask, *mbind_nodemask, cpuset_current_mems_allowed);
2339 else
2340 alloc_nodemask = cpuset_current_mems_allowed;
2341
2342 lockdep_assert_held(&hugetlb_lock);
2343 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2344 if (needed <= 0) {
2345 h->resv_huge_pages += delta;
2346 return 0;
2347 }
2348
2349 allocated = 0;
2350
2351 ret = -ENOMEM;
2352 retry:
2353 spin_unlock_irq(&hugetlb_lock);
2354 for (i = 0; i < needed; i++) {
2355 folio = NULL;
2356
2357 /*
2358 * It is okay to use NUMA_NO_NODE because we use numa_mem_id()
2359 * down the road to pick the current node if that is the case.
2360 */
2361 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
2362 NUMA_NO_NODE, &alloc_nodemask);
2363 if (!folio) {
2364 alloc_ok = false;
2365 break;
2366 }
2367 list_add(&folio->lru, &surplus_list);
2368 cond_resched();
2369 }
2370 allocated += i;
2371
2372 /*
2373 * After retaking hugetlb_lock, we need to recalculate 'needed'
2374 * because either resv_huge_pages or free_huge_pages may have changed.
2375 */
2376 spin_lock_irq(&hugetlb_lock);
2377 needed = (h->resv_huge_pages + delta) -
2378 (h->free_huge_pages + allocated);
2379 if (needed > 0) {
2380 if (alloc_ok)
2381 goto retry;
2382 /*
2383 * We were not able to allocate enough pages to
2384 * satisfy the entire reservation so we free what
2385 * we've allocated so far.
2386 */
2387 goto free;
2388 }
2389 /*
2390 * The surplus_list now contains _at_least_ the number of extra pages
2391 * needed to accommodate the reservation. Add the appropriate number
2392 * of pages to the hugetlb pool and free the extras back to the buddy
2393 * allocator. Commit the entire reservation here to prevent another
2394 * process from stealing the pages as they are added to the pool but
2395 * before they are reserved.
2396 */
2397 needed += allocated;
2398 h->resv_huge_pages += delta;
2399 ret = 0;
2400
2401 /* Free the needed pages to the hugetlb pool */
2402 list_for_each_entry_safe(folio, tmp, &surplus_list, lru) {
2403 if ((--needed) < 0)
2404 break;
2405 /* Add the page to the hugetlb allocator */
2406 enqueue_hugetlb_folio(h, folio);
2407 }
2408 free:
2409 spin_unlock_irq(&hugetlb_lock);
2410
2411 /*
2412 * Free unnecessary surplus pages to the buddy allocator.
2413 * Pages have no ref count, call free_huge_folio directly.
2414 */
2415 list_for_each_entry_safe(folio, tmp, &surplus_list, lru)
2416 free_huge_folio(folio);
2417 spin_lock_irq(&hugetlb_lock);
2418
2419 return ret;
2420 }
2421
2422 /*
2423 * This routine has two main purposes:
2424 * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2425 * in unused_resv_pages. This corresponds to the prior adjustments made
2426 * to the associated reservation map.
2427 * 2) Free any unused surplus pages that may have been allocated to satisfy
2428 * the reservation. As many as unused_resv_pages may be freed.
2429 */
return_unused_surplus_pages(struct hstate * h,unsigned long unused_resv_pages)2430 static void return_unused_surplus_pages(struct hstate *h,
2431 unsigned long unused_resv_pages)
2432 {
2433 unsigned long nr_pages;
2434 LIST_HEAD(page_list);
2435
2436 lockdep_assert_held(&hugetlb_lock);
2437 /* Uncommit the reservation */
2438 h->resv_huge_pages -= unused_resv_pages;
2439
2440 if (hstate_is_gigantic_no_runtime(h))
2441 goto out;
2442
2443 /*
2444 * Part (or even all) of the reservation could have been backed
2445 * by pre-allocated pages. Only free surplus pages.
2446 */
2447 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2448
2449 /*
2450 * We want to release as many surplus pages as possible, spread
2451 * evenly across all nodes with memory. Iterate across these nodes
2452 * until we can no longer free unreserved surplus pages. This occurs
2453 * when the nodes with surplus pages have no free pages.
2454 * remove_pool_hugetlb_folio() will balance the freed pages across the
2455 * on-line nodes with memory and will handle the hstate accounting.
2456 */
2457 while (nr_pages--) {
2458 struct folio *folio;
2459
2460 folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1);
2461 if (!folio)
2462 goto out;
2463
2464 list_add(&folio->lru, &page_list);
2465 }
2466
2467 out:
2468 spin_unlock_irq(&hugetlb_lock);
2469 update_and_free_pages_bulk(h, &page_list);
2470 spin_lock_irq(&hugetlb_lock);
2471 }
2472
2473
2474 /*
2475 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2476 * are used by the huge page allocation routines to manage reservations.
2477 *
2478 * vma_needs_reservation is called to determine if the huge page at addr
2479 * within the vma has an associated reservation. If a reservation is
2480 * needed, the value 1 is returned. The caller is then responsible for
2481 * managing the global reservation and subpool usage counts. After
2482 * the huge page has been allocated, vma_commit_reservation is called
2483 * to add the page to the reservation map. If the page allocation fails,
2484 * the reservation must be ended instead of committed. vma_end_reservation
2485 * is called in such cases.
2486 *
2487 * In the normal case, vma_commit_reservation returns the same value
2488 * as the preceding vma_needs_reservation call. The only time this
2489 * is not the case is if a reserve map was changed between calls. It
2490 * is the responsibility of the caller to notice the difference and
2491 * take appropriate action.
2492 *
2493 * vma_add_reservation is used in error paths where a reservation must
2494 * be restored when a newly allocated huge page must be freed. It is
2495 * to be called after calling vma_needs_reservation to determine if a
2496 * reservation exists.
2497 *
2498 * vma_del_reservation is used in error paths where an entry in the reserve
2499 * map was created during huge page allocation and must be removed. It is to
2500 * be called after calling vma_needs_reservation to determine if a reservation
2501 * exists.
2502 */
2503 enum vma_resv_mode {
2504 VMA_NEEDS_RESV,
2505 VMA_COMMIT_RESV,
2506 VMA_END_RESV,
2507 VMA_ADD_RESV,
2508 VMA_DEL_RESV,
2509 };
__vma_reservation_common(struct hstate * h,struct vm_area_struct * vma,unsigned long addr,enum vma_resv_mode mode)2510 static long __vma_reservation_common(struct hstate *h,
2511 struct vm_area_struct *vma, unsigned long addr,
2512 enum vma_resv_mode mode)
2513 {
2514 struct resv_map *resv;
2515 pgoff_t idx;
2516 long ret;
2517 long dummy_out_regions_needed;
2518
2519 resv = vma_resv_map(vma);
2520 if (!resv)
2521 return 1;
2522
2523 idx = vma_hugecache_offset(h, vma, addr);
2524 switch (mode) {
2525 case VMA_NEEDS_RESV:
2526 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2527 /* We assume that vma_reservation_* routines always operate on
2528 * 1 page, and that adding to resv map a 1 page entry can only
2529 * ever require 1 region.
2530 */
2531 VM_BUG_ON(dummy_out_regions_needed != 1);
2532 break;
2533 case VMA_COMMIT_RESV:
2534 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2535 /* region_add calls of range 1 should never fail. */
2536 VM_BUG_ON(ret < 0);
2537 break;
2538 case VMA_END_RESV:
2539 region_abort(resv, idx, idx + 1, 1);
2540 ret = 0;
2541 break;
2542 case VMA_ADD_RESV:
2543 if (vma->vm_flags & VM_MAYSHARE) {
2544 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2545 /* region_add calls of range 1 should never fail. */
2546 VM_BUG_ON(ret < 0);
2547 } else {
2548 region_abort(resv, idx, idx + 1, 1);
2549 ret = region_del(resv, idx, idx + 1);
2550 }
2551 break;
2552 case VMA_DEL_RESV:
2553 if (vma->vm_flags & VM_MAYSHARE) {
2554 region_abort(resv, idx, idx + 1, 1);
2555 ret = region_del(resv, idx, idx + 1);
2556 } else {
2557 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2558 /* region_add calls of range 1 should never fail. */
2559 VM_BUG_ON(ret < 0);
2560 }
2561 break;
2562 default:
2563 BUG();
2564 }
2565
2566 if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
2567 return ret;
2568 /*
2569 * We know private mapping must have HPAGE_RESV_OWNER set.
2570 *
2571 * In most cases, reserves always exist for private mappings.
2572 * However, a file associated with mapping could have been
2573 * hole punched or truncated after reserves were consumed.
2574 * As subsequent fault on such a range will not use reserves.
2575 * Subtle - The reserve map for private mappings has the
2576 * opposite meaning than that of shared mappings. If NO
2577 * entry is in the reserve map, it means a reservation exists.
2578 * If an entry exists in the reserve map, it means the
2579 * reservation has already been consumed. As a result, the
2580 * return value of this routine is the opposite of the
2581 * value returned from reserve map manipulation routines above.
2582 */
2583 if (ret > 0)
2584 return 0;
2585 if (ret == 0)
2586 return 1;
2587 return ret;
2588 }
2589
vma_needs_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2590 static long vma_needs_reservation(struct hstate *h,
2591 struct vm_area_struct *vma, unsigned long addr)
2592 {
2593 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2594 }
2595
vma_commit_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2596 static long vma_commit_reservation(struct hstate *h,
2597 struct vm_area_struct *vma, unsigned long addr)
2598 {
2599 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2600 }
2601
vma_end_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2602 static void vma_end_reservation(struct hstate *h,
2603 struct vm_area_struct *vma, unsigned long addr)
2604 {
2605 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2606 }
2607
vma_add_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2608 static long vma_add_reservation(struct hstate *h,
2609 struct vm_area_struct *vma, unsigned long addr)
2610 {
2611 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2612 }
2613
vma_del_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2614 static long vma_del_reservation(struct hstate *h,
2615 struct vm_area_struct *vma, unsigned long addr)
2616 {
2617 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2618 }
2619
2620 /*
2621 * This routine is called to restore reservation information on error paths.
2622 * It should ONLY be called for folios allocated via alloc_hugetlb_folio(),
2623 * and the hugetlb mutex should remain held when calling this routine.
2624 *
2625 * It handles two specific cases:
2626 * 1) A reservation was in place and the folio consumed the reservation.
2627 * hugetlb_restore_reserve is set in the folio.
2628 * 2) No reservation was in place for the page, so hugetlb_restore_reserve is
2629 * not set. However, alloc_hugetlb_folio always updates the reserve map.
2630 *
2631 * In case 1, free_huge_folio later in the error path will increment the
2632 * global reserve count. But, free_huge_folio does not have enough context
2633 * to adjust the reservation map. This case deals primarily with private
2634 * mappings. Adjust the reserve map here to be consistent with global
2635 * reserve count adjustments to be made by free_huge_folio. Make sure the
2636 * reserve map indicates there is a reservation present.
2637 *
2638 * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio.
2639 */
restore_reserve_on_error(struct hstate * h,struct vm_area_struct * vma,unsigned long address,struct folio * folio)2640 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2641 unsigned long address, struct folio *folio)
2642 {
2643 long rc = vma_needs_reservation(h, vma, address);
2644
2645 if (folio_test_hugetlb_restore_reserve(folio)) {
2646 if (unlikely(rc < 0))
2647 /*
2648 * Rare out of memory condition in reserve map
2649 * manipulation. Clear hugetlb_restore_reserve so
2650 * that global reserve count will not be incremented
2651 * by free_huge_folio. This will make it appear
2652 * as though the reservation for this folio was
2653 * consumed. This may prevent the task from
2654 * faulting in the folio at a later time. This
2655 * is better than inconsistent global huge page
2656 * accounting of reserve counts.
2657 */
2658 folio_clear_hugetlb_restore_reserve(folio);
2659 else if (rc)
2660 (void)vma_add_reservation(h, vma, address);
2661 else
2662 vma_end_reservation(h, vma, address);
2663 } else {
2664 if (!rc) {
2665 /*
2666 * This indicates there is an entry in the reserve map
2667 * not added by alloc_hugetlb_folio. We know it was added
2668 * before the alloc_hugetlb_folio call, otherwise
2669 * hugetlb_restore_reserve would be set on the folio.
2670 * Remove the entry so that a subsequent allocation
2671 * does not consume a reservation.
2672 */
2673 rc = vma_del_reservation(h, vma, address);
2674 if (rc < 0)
2675 /*
2676 * VERY rare out of memory condition. Since
2677 * we can not delete the entry, set
2678 * hugetlb_restore_reserve so that the reserve
2679 * count will be incremented when the folio
2680 * is freed. This reserve will be consumed
2681 * on a subsequent allocation.
2682 */
2683 folio_set_hugetlb_restore_reserve(folio);
2684 } else if (rc < 0) {
2685 /*
2686 * Rare out of memory condition from
2687 * vma_needs_reservation call. Memory allocation is
2688 * only attempted if a new entry is needed. Therefore,
2689 * this implies there is not an entry in the
2690 * reserve map.
2691 *
2692 * For shared mappings, no entry in the map indicates
2693 * no reservation. We are done.
2694 */
2695 if (!(vma->vm_flags & VM_MAYSHARE))
2696 /*
2697 * For private mappings, no entry indicates
2698 * a reservation is present. Since we can
2699 * not add an entry, set hugetlb_restore_reserve
2700 * on the folio so reserve count will be
2701 * incremented when freed. This reserve will
2702 * be consumed on a subsequent allocation.
2703 */
2704 folio_set_hugetlb_restore_reserve(folio);
2705 } else
2706 /*
2707 * No reservation present, do nothing
2708 */
2709 vma_end_reservation(h, vma, address);
2710 }
2711 }
2712
2713 /*
2714 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
2715 * the old one
2716 * @old_folio: Old folio to dissolve
2717 * @list: List to isolate the page in case we need to
2718 * Returns 0 on success, otherwise negated error.
2719 */
alloc_and_dissolve_hugetlb_folio(struct folio * old_folio,struct list_head * list)2720 static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio,
2721 struct list_head *list)
2722 {
2723 gfp_t gfp_mask;
2724 struct hstate *h;
2725 int nid = folio_nid(old_folio);
2726 struct folio *new_folio = NULL;
2727 int ret = 0;
2728
2729 retry:
2730 /*
2731 * The old_folio might have been dissolved from under our feet, so make sure
2732 * to carefully check the state under the lock.
2733 */
2734 spin_lock_irq(&hugetlb_lock);
2735 if (!folio_test_hugetlb(old_folio)) {
2736 /*
2737 * Freed from under us. Drop new_folio too.
2738 */
2739 goto free_new;
2740 } else if (folio_ref_count(old_folio)) {
2741 bool isolated;
2742
2743 /*
2744 * Someone has grabbed the folio, try to isolate it here.
2745 * Fail with -EBUSY if not possible.
2746 */
2747 spin_unlock_irq(&hugetlb_lock);
2748 isolated = folio_isolate_hugetlb(old_folio, list);
2749 ret = isolated ? 0 : -EBUSY;
2750 spin_lock_irq(&hugetlb_lock);
2751 goto free_new;
2752 } else if (!folio_test_hugetlb_freed(old_folio)) {
2753 /*
2754 * Folio's refcount is 0 but it has not been enqueued in the
2755 * freelist yet. Race window is small, so we can succeed here if
2756 * we retry.
2757 */
2758 spin_unlock_irq(&hugetlb_lock);
2759 cond_resched();
2760 goto retry;
2761 } else {
2762 h = folio_hstate(old_folio);
2763 if (!new_folio) {
2764 spin_unlock_irq(&hugetlb_lock);
2765 gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2766 new_folio = alloc_fresh_hugetlb_folio(h, gfp_mask,
2767 nid, NULL);
2768 if (!new_folio)
2769 return -ENOMEM;
2770 goto retry;
2771 }
2772
2773 /*
2774 * Ok, old_folio is still a genuine free hugepage. Remove it from
2775 * the freelist and decrease the counters. These will be
2776 * incremented again when calling account_new_hugetlb_folio()
2777 * and enqueue_hugetlb_folio() for new_folio. The counters will
2778 * remain stable since this happens under the lock.
2779 */
2780 remove_hugetlb_folio(h, old_folio, false);
2781
2782 /*
2783 * Ref count on new_folio is already zero as it was dropped
2784 * earlier. It can be directly added to the pool free list.
2785 */
2786 account_new_hugetlb_folio(h, new_folio);
2787 enqueue_hugetlb_folio(h, new_folio);
2788
2789 /*
2790 * Folio has been replaced, we can safely free the old one.
2791 */
2792 spin_unlock_irq(&hugetlb_lock);
2793 update_and_free_hugetlb_folio(h, old_folio, false);
2794 }
2795
2796 return ret;
2797
2798 free_new:
2799 spin_unlock_irq(&hugetlb_lock);
2800 if (new_folio)
2801 update_and_free_hugetlb_folio(h, new_folio, false);
2802
2803 return ret;
2804 }
2805
isolate_or_dissolve_huge_folio(struct folio * folio,struct list_head * list)2806 int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
2807 {
2808 int ret = -EBUSY;
2809
2810 /* Not to disrupt normal path by vainly holding hugetlb_lock */
2811 if (!folio_test_hugetlb(folio))
2812 return 0;
2813
2814 /*
2815 * Fence off gigantic pages as there is a cyclic dependency between
2816 * alloc_contig_range and them. Return -ENOMEM as this has the effect
2817 * of bailing out right away without further retrying.
2818 */
2819 if (order_is_gigantic(folio_order(folio)))
2820 return -ENOMEM;
2821
2822 if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list))
2823 ret = 0;
2824 else if (!folio_ref_count(folio))
2825 ret = alloc_and_dissolve_hugetlb_folio(folio, list);
2826
2827 return ret;
2828 }
2829
2830 /*
2831 * replace_free_hugepage_folios - Replace free hugepage folios in a given pfn
2832 * range with new folios.
2833 * @start_pfn: start pfn of the given pfn range
2834 * @end_pfn: end pfn of the given pfn range
2835 * Returns 0 on success, otherwise negated error.
2836 */
replace_free_hugepage_folios(unsigned long start_pfn,unsigned long end_pfn)2837 int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
2838 {
2839 struct folio *folio;
2840 int ret = 0;
2841
2842 LIST_HEAD(isolate_list);
2843
2844 while (start_pfn < end_pfn) {
2845 folio = pfn_folio(start_pfn);
2846
2847 /* Not to disrupt normal path by vainly holding hugetlb_lock */
2848 if (folio_test_hugetlb(folio) && !folio_ref_count(folio)) {
2849 ret = alloc_and_dissolve_hugetlb_folio(folio, &isolate_list);
2850 if (ret)
2851 break;
2852
2853 putback_movable_pages(&isolate_list);
2854 }
2855 start_pfn++;
2856 }
2857
2858 return ret;
2859 }
2860
wait_for_freed_hugetlb_folios(void)2861 void wait_for_freed_hugetlb_folios(void)
2862 {
2863 if (llist_empty(&hpage_freelist))
2864 return;
2865
2866 flush_work(&free_hpage_work);
2867 }
2868
2869 typedef enum {
2870 /*
2871 * For either 0/1: we checked the per-vma resv map, and one resv
2872 * count either can be reused (0), or an extra needed (1).
2873 */
2874 MAP_CHG_REUSE = 0,
2875 MAP_CHG_NEEDED = 1,
2876 /*
2877 * Cannot use per-vma resv count can be used, hence a new resv
2878 * count is enforced.
2879 *
2880 * NOTE: This is mostly identical to MAP_CHG_NEEDED, except
2881 * that currently vma_needs_reservation() has an unwanted side
2882 * effect to either use end() or commit() to complete the
2883 * transaction. Hence it needs to differentiate from NEEDED.
2884 */
2885 MAP_CHG_ENFORCED = 2,
2886 } map_chg_state;
2887
2888 /*
2889 * NOTE! "cow_from_owner" represents a very hacky usage only used in CoW
2890 * faults of hugetlb private mappings on top of a non-page-cache folio (in
2891 * which case even if there's a private vma resv map it won't cover such
2892 * allocation). New call sites should (probably) never set it to true!!
2893 * When it's set, the allocation will bypass all vma level reservations.
2894 */
alloc_hugetlb_folio(struct vm_area_struct * vma,unsigned long addr,bool cow_from_owner)2895 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
2896 unsigned long addr, bool cow_from_owner)
2897 {
2898 struct hugepage_subpool *spool = subpool_vma(vma);
2899 struct hstate *h = hstate_vma(vma);
2900 struct folio *folio;
2901 long retval, gbl_chg, gbl_reserve;
2902 map_chg_state map_chg;
2903 int ret, idx;
2904 struct hugetlb_cgroup *h_cg = NULL;
2905 gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL;
2906
2907 idx = hstate_index(h);
2908
2909 /* Whether we need a separate per-vma reservation? */
2910 if (cow_from_owner) {
2911 /*
2912 * Special case! Since it's a CoW on top of a reserved
2913 * page, the private resv map doesn't count. So it cannot
2914 * consume the per-vma resv map even if it's reserved.
2915 */
2916 map_chg = MAP_CHG_ENFORCED;
2917 } else {
2918 /*
2919 * Examine the region/reserve map to determine if the process
2920 * has a reservation for the page to be allocated. A return
2921 * code of zero indicates a reservation exists (no change).
2922 */
2923 retval = vma_needs_reservation(h, vma, addr);
2924 if (retval < 0)
2925 return ERR_PTR(-ENOMEM);
2926 map_chg = retval ? MAP_CHG_NEEDED : MAP_CHG_REUSE;
2927 }
2928
2929 /*
2930 * Whether we need a separate global reservation?
2931 *
2932 * Processes that did not create the mapping will have no
2933 * reserves as indicated by the region/reserve map. Check
2934 * that the allocation will not exceed the subpool limit.
2935 * Or if it can get one from the pool reservation directly.
2936 */
2937 if (map_chg) {
2938 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2939 if (gbl_chg < 0)
2940 goto out_end_reservation;
2941 } else {
2942 /*
2943 * If we have the vma reservation ready, no need for extra
2944 * global reservation.
2945 */
2946 gbl_chg = 0;
2947 }
2948
2949 /*
2950 * If this allocation is not consuming a per-vma reservation,
2951 * charge the hugetlb cgroup now.
2952 */
2953 if (map_chg) {
2954 ret = hugetlb_cgroup_charge_cgroup_rsvd(
2955 idx, pages_per_huge_page(h), &h_cg);
2956 if (ret)
2957 goto out_subpool_put;
2958 }
2959
2960 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2961 if (ret)
2962 goto out_uncharge_cgroup_reservation;
2963
2964 spin_lock_irq(&hugetlb_lock);
2965 /*
2966 * glb_chg is passed to indicate whether or not a page must be taken
2967 * from the global free pool (global change). gbl_chg == 0 indicates
2968 * a reservation exists for the allocation.
2969 */
2970 folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg);
2971 if (!folio) {
2972 spin_unlock_irq(&hugetlb_lock);
2973 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
2974 if (!folio)
2975 goto out_uncharge_cgroup;
2976 spin_lock_irq(&hugetlb_lock);
2977 list_add(&folio->lru, &h->hugepage_activelist);
2978 folio_ref_unfreeze(folio, 1);
2979 /* Fall through */
2980 }
2981
2982 /*
2983 * Either dequeued or buddy-allocated folio needs to add special
2984 * mark to the folio when it consumes a global reservation.
2985 */
2986 if (!gbl_chg) {
2987 folio_set_hugetlb_restore_reserve(folio);
2988 h->resv_huge_pages--;
2989 }
2990
2991 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
2992 /* If allocation is not consuming a reservation, also store the
2993 * hugetlb_cgroup pointer on the page.
2994 */
2995 if (map_chg) {
2996 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
2997 h_cg, folio);
2998 }
2999
3000 spin_unlock_irq(&hugetlb_lock);
3001
3002 hugetlb_set_folio_subpool(folio, spool);
3003
3004 if (map_chg != MAP_CHG_ENFORCED) {
3005 /* commit() is only needed if the map_chg is not enforced */
3006 retval = vma_commit_reservation(h, vma, addr);
3007 /*
3008 * Check for possible race conditions. When it happens..
3009 * The page was added to the reservation map between
3010 * vma_needs_reservation and vma_commit_reservation.
3011 * This indicates a race with hugetlb_reserve_pages.
3012 * Adjust for the subpool count incremented above AND
3013 * in hugetlb_reserve_pages for the same page. Also,
3014 * the reservation count added in hugetlb_reserve_pages
3015 * no longer applies.
3016 */
3017 if (unlikely(map_chg == MAP_CHG_NEEDED && retval == 0)) {
3018 long rsv_adjust;
3019
3020 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
3021 hugetlb_acct_memory(h, -rsv_adjust);
3022 if (map_chg) {
3023 spin_lock_irq(&hugetlb_lock);
3024 hugetlb_cgroup_uncharge_folio_rsvd(
3025 hstate_index(h), pages_per_huge_page(h),
3026 folio);
3027 spin_unlock_irq(&hugetlb_lock);
3028 }
3029 }
3030 }
3031
3032 ret = mem_cgroup_charge_hugetlb(folio, gfp);
3033 /*
3034 * Unconditionally increment NR_HUGETLB here. If it turns out that
3035 * mem_cgroup_charge_hugetlb failed, then immediately free the page and
3036 * decrement NR_HUGETLB.
3037 */
3038 lruvec_stat_mod_folio(folio, NR_HUGETLB, pages_per_huge_page(h));
3039
3040 if (ret == -ENOMEM) {
3041 free_huge_folio(folio);
3042 return ERR_PTR(-ENOMEM);
3043 }
3044
3045 return folio;
3046
3047 out_uncharge_cgroup:
3048 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
3049 out_uncharge_cgroup_reservation:
3050 if (map_chg)
3051 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
3052 h_cg);
3053 out_subpool_put:
3054 /*
3055 * put page to subpool iff the quota of subpool's rsv_hpages is used
3056 * during hugepage_subpool_get_pages.
3057 */
3058 if (map_chg && !gbl_chg) {
3059 gbl_reserve = hugepage_subpool_put_pages(spool, 1);
3060 hugetlb_acct_memory(h, -gbl_reserve);
3061 }
3062
3063
3064 out_end_reservation:
3065 if (map_chg != MAP_CHG_ENFORCED)
3066 vma_end_reservation(h, vma, addr);
3067 return ERR_PTR(-ENOSPC);
3068 }
3069
alloc_bootmem(struct hstate * h,int nid,bool node_exact)3070 static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact)
3071 {
3072 struct huge_bootmem_page *m;
3073 int listnode = nid;
3074
3075 if (hugetlb_early_cma(h))
3076 m = hugetlb_cma_alloc_bootmem(h, &listnode, node_exact);
3077 else {
3078 if (node_exact)
3079 m = memblock_alloc_exact_nid_raw(huge_page_size(h),
3080 huge_page_size(h), 0,
3081 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3082 else {
3083 m = memblock_alloc_try_nid_raw(huge_page_size(h),
3084 huge_page_size(h), 0,
3085 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3086 /*
3087 * For pre-HVO to work correctly, pages need to be on
3088 * the list for the node they were actually allocated
3089 * from. That node may be different in the case of
3090 * fallback by memblock_alloc_try_nid_raw. So,
3091 * extract the actual node first.
3092 */
3093 if (m)
3094 listnode = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m)));
3095 }
3096
3097 if (m) {
3098 m->flags = 0;
3099 m->cma = NULL;
3100 }
3101 }
3102
3103 if (m) {
3104 /*
3105 * Use the beginning of the huge page to store the
3106 * huge_bootmem_page struct (until gather_bootmem
3107 * puts them into the mem_map).
3108 *
3109 * Put them into a private list first because mem_map
3110 * is not up yet.
3111 */
3112 INIT_LIST_HEAD(&m->list);
3113 list_add(&m->list, &huge_boot_pages[listnode]);
3114 m->hstate = h;
3115 }
3116
3117 return m;
3118 }
3119
3120 int alloc_bootmem_huge_page(struct hstate *h, int nid)
3121 __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
__alloc_bootmem_huge_page(struct hstate * h,int nid)3122 int __alloc_bootmem_huge_page(struct hstate *h, int nid)
3123 {
3124 struct huge_bootmem_page *m = NULL; /* initialize for clang */
3125 int nr_nodes, node = nid;
3126
3127 /* do node specific alloc */
3128 if (nid != NUMA_NO_NODE) {
3129 m = alloc_bootmem(h, node, true);
3130 if (!m)
3131 return 0;
3132 goto found;
3133 }
3134
3135 /* allocate from next node when distributing huge pages */
3136 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node,
3137 &hugetlb_bootmem_nodes) {
3138 m = alloc_bootmem(h, node, false);
3139 if (!m)
3140 return 0;
3141 goto found;
3142 }
3143
3144 found:
3145
3146 /*
3147 * Only initialize the head struct page in memmap_init_reserved_pages,
3148 * rest of the struct pages will be initialized by the HugeTLB
3149 * subsystem itself.
3150 * The head struct page is used to get folio information by the HugeTLB
3151 * subsystem like zone id and node id.
3152 */
3153 memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE),
3154 huge_page_size(h) - PAGE_SIZE);
3155
3156 return 1;
3157 }
3158
3159 /* Initialize [start_page:end_page_number] tail struct pages of a hugepage */
hugetlb_folio_init_tail_vmemmap(struct folio * folio,unsigned long start_page_number,unsigned long end_page_number)3160 static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio,
3161 unsigned long start_page_number,
3162 unsigned long end_page_number)
3163 {
3164 enum zone_type zone = folio_zonenum(folio);
3165 int nid = folio_nid(folio);
3166 struct page *page = folio_page(folio, start_page_number);
3167 unsigned long head_pfn = folio_pfn(folio);
3168 unsigned long pfn, end_pfn = head_pfn + end_page_number;
3169
3170 /*
3171 * As we marked all tail pages with memblock_reserved_mark_noinit(),
3172 * we must initialize them ourselves here.
3173 */
3174 for (pfn = head_pfn + start_page_number; pfn < end_pfn; page++, pfn++) {
3175 __init_single_page(page, pfn, zone, nid);
3176 prep_compound_tail((struct page *)folio, pfn - head_pfn);
3177 set_page_count(page, 0);
3178 }
3179 }
3180
hugetlb_folio_init_vmemmap(struct folio * folio,struct hstate * h,unsigned long nr_pages)3181 static void __init hugetlb_folio_init_vmemmap(struct folio *folio,
3182 struct hstate *h,
3183 unsigned long nr_pages)
3184 {
3185 int ret;
3186
3187 /*
3188 * This is an open-coded prep_compound_page() whereby we avoid
3189 * walking pages twice by initializing/preparing+freezing them in the
3190 * same go.
3191 */
3192 __folio_clear_reserved(folio);
3193 __folio_set_head(folio);
3194 ret = folio_ref_freeze(folio, 1);
3195 VM_BUG_ON(!ret);
3196 hugetlb_folio_init_tail_vmemmap(folio, 1, nr_pages);
3197 prep_compound_head(&folio->page, huge_page_order(h));
3198 }
3199
hugetlb_bootmem_page_prehvo(struct huge_bootmem_page * m)3200 static bool __init hugetlb_bootmem_page_prehvo(struct huge_bootmem_page *m)
3201 {
3202 return m->flags & HUGE_BOOTMEM_HVO;
3203 }
3204
hugetlb_bootmem_page_earlycma(struct huge_bootmem_page * m)3205 static bool __init hugetlb_bootmem_page_earlycma(struct huge_bootmem_page *m)
3206 {
3207 return m->flags & HUGE_BOOTMEM_CMA;
3208 }
3209
3210 /*
3211 * memblock-allocated pageblocks might not have the migrate type set
3212 * if marked with the 'noinit' flag. Set it to the default (MIGRATE_MOVABLE)
3213 * here, or MIGRATE_CMA if this was a page allocated through an early CMA
3214 * reservation.
3215 *
3216 * In case of vmemmap optimized folios, the tail vmemmap pages are mapped
3217 * read-only, but that's ok - for sparse vmemmap this does not write to
3218 * the page structure.
3219 */
hugetlb_bootmem_init_migratetype(struct folio * folio,struct hstate * h)3220 static void __init hugetlb_bootmem_init_migratetype(struct folio *folio,
3221 struct hstate *h)
3222 {
3223 unsigned long nr_pages = pages_per_huge_page(h), i;
3224
3225 WARN_ON_ONCE(!pageblock_aligned(folio_pfn(folio)));
3226
3227 for (i = 0; i < nr_pages; i += pageblock_nr_pages) {
3228 if (folio_test_hugetlb_cma(folio))
3229 init_cma_pageblock(folio_page(folio, i));
3230 else
3231 init_pageblock_migratetype(folio_page(folio, i),
3232 MIGRATE_MOVABLE, false);
3233 }
3234 }
3235
prep_and_add_bootmem_folios(struct hstate * h,struct list_head * folio_list)3236 static void __init prep_and_add_bootmem_folios(struct hstate *h,
3237 struct list_head *folio_list)
3238 {
3239 unsigned long flags;
3240 struct folio *folio, *tmp_f;
3241
3242 /* Send list for bulk vmemmap optimization processing */
3243 hugetlb_vmemmap_optimize_bootmem_folios(h, folio_list);
3244
3245 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
3246 if (!folio_test_hugetlb_vmemmap_optimized(folio)) {
3247 /*
3248 * If HVO fails, initialize all tail struct pages
3249 * We do not worry about potential long lock hold
3250 * time as this is early in boot and there should
3251 * be no contention.
3252 */
3253 hugetlb_folio_init_tail_vmemmap(folio,
3254 HUGETLB_VMEMMAP_RESERVE_PAGES,
3255 pages_per_huge_page(h));
3256 }
3257 hugetlb_bootmem_init_migratetype(folio, h);
3258 /* Subdivide locks to achieve better parallel performance */
3259 spin_lock_irqsave(&hugetlb_lock, flags);
3260 account_new_hugetlb_folio(h, folio);
3261 enqueue_hugetlb_folio(h, folio);
3262 spin_unlock_irqrestore(&hugetlb_lock, flags);
3263 }
3264 }
3265
hugetlb_bootmem_page_zones_valid(int nid,struct huge_bootmem_page * m)3266 bool __init hugetlb_bootmem_page_zones_valid(int nid,
3267 struct huge_bootmem_page *m)
3268 {
3269 unsigned long start_pfn;
3270 bool valid;
3271
3272 if (m->flags & HUGE_BOOTMEM_ZONES_VALID) {
3273 /*
3274 * Already validated, skip check.
3275 */
3276 return true;
3277 }
3278
3279 if (hugetlb_bootmem_page_earlycma(m)) {
3280 valid = cma_validate_zones(m->cma);
3281 goto out;
3282 }
3283
3284 start_pfn = virt_to_phys(m) >> PAGE_SHIFT;
3285
3286 valid = !pfn_range_intersects_zones(nid, start_pfn,
3287 pages_per_huge_page(m->hstate));
3288 out:
3289 if (!valid)
3290 hstate_boot_nrinvalid[hstate_index(m->hstate)]++;
3291
3292 return valid;
3293 }
3294
3295 /*
3296 * Free a bootmem page that was found to be invalid (intersecting with
3297 * multiple zones).
3298 *
3299 * Since it intersects with multiple zones, we can't just do a free
3300 * operation on all pages at once, but instead have to walk all
3301 * pages, freeing them one by one.
3302 */
hugetlb_bootmem_free_invalid_page(int nid,struct page * page,struct hstate * h)3303 static void __init hugetlb_bootmem_free_invalid_page(int nid, struct page *page,
3304 struct hstate *h)
3305 {
3306 unsigned long npages = pages_per_huge_page(h);
3307 unsigned long pfn;
3308
3309 while (npages--) {
3310 pfn = page_to_pfn(page);
3311 __init_page_from_nid(pfn, nid);
3312 free_reserved_page(page);
3313 page++;
3314 }
3315 }
3316
3317 /*
3318 * Put bootmem huge pages into the standard lists after mem_map is up.
3319 * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
3320 */
gather_bootmem_prealloc_node(unsigned long nid)3321 static void __init gather_bootmem_prealloc_node(unsigned long nid)
3322 {
3323 LIST_HEAD(folio_list);
3324 struct huge_bootmem_page *m, *tm;
3325 struct hstate *h = NULL, *prev_h = NULL;
3326
3327 list_for_each_entry_safe(m, tm, &huge_boot_pages[nid], list) {
3328 struct page *page = virt_to_page(m);
3329 struct folio *folio = (void *)page;
3330
3331 h = m->hstate;
3332 if (!hugetlb_bootmem_page_zones_valid(nid, m)) {
3333 /*
3334 * Can't use this page. Initialize the
3335 * page structures if that hasn't already
3336 * been done, and give them to the page
3337 * allocator.
3338 */
3339 hugetlb_bootmem_free_invalid_page(nid, page, h);
3340 continue;
3341 }
3342
3343 /*
3344 * It is possible to have multiple huge page sizes (hstates)
3345 * in this list. If so, process each size separately.
3346 */
3347 if (h != prev_h && prev_h != NULL)
3348 prep_and_add_bootmem_folios(prev_h, &folio_list);
3349 prev_h = h;
3350
3351 VM_BUG_ON(!hstate_is_gigantic(h));
3352 WARN_ON(folio_ref_count(folio) != 1);
3353
3354 hugetlb_folio_init_vmemmap(folio, h,
3355 HUGETLB_VMEMMAP_RESERVE_PAGES);
3356 init_new_hugetlb_folio(folio);
3357
3358 if (hugetlb_bootmem_page_prehvo(m))
3359 /*
3360 * If pre-HVO was done, just set the
3361 * flag, the HVO code will then skip
3362 * this folio.
3363 */
3364 folio_set_hugetlb_vmemmap_optimized(folio);
3365
3366 if (hugetlb_bootmem_page_earlycma(m))
3367 folio_set_hugetlb_cma(folio);
3368
3369 list_add(&folio->lru, &folio_list);
3370
3371 /*
3372 * We need to restore the 'stolen' pages to totalram_pages
3373 * in order to fix confusing memory reports from free(1) and
3374 * other side-effects, like CommitLimit going negative.
3375 *
3376 * For CMA pages, this is done in init_cma_pageblock
3377 * (via hugetlb_bootmem_init_migratetype), so skip it here.
3378 */
3379 if (!folio_test_hugetlb_cma(folio))
3380 adjust_managed_page_count(page, pages_per_huge_page(h));
3381 cond_resched();
3382 }
3383
3384 prep_and_add_bootmem_folios(h, &folio_list);
3385 }
3386
gather_bootmem_prealloc_parallel(unsigned long start,unsigned long end,void * arg)3387 static void __init gather_bootmem_prealloc_parallel(unsigned long start,
3388 unsigned long end, void *arg)
3389 {
3390 int nid;
3391
3392 for (nid = start; nid < end; nid++)
3393 gather_bootmem_prealloc_node(nid);
3394 }
3395
gather_bootmem_prealloc(void)3396 static void __init gather_bootmem_prealloc(void)
3397 {
3398 struct padata_mt_job job = {
3399 .thread_fn = gather_bootmem_prealloc_parallel,
3400 .fn_arg = NULL,
3401 .start = 0,
3402 .size = nr_node_ids,
3403 .align = 1,
3404 .min_chunk = 1,
3405 .max_threads = num_node_state(N_MEMORY),
3406 .numa_aware = true,
3407 };
3408
3409 padata_do_multithreaded(&job);
3410 }
3411
hugetlb_hstate_alloc_pages_onenode(struct hstate * h,int nid)3412 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3413 {
3414 unsigned long i;
3415 char buf[32];
3416 LIST_HEAD(folio_list);
3417
3418 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3419 if (hstate_is_gigantic(h)) {
3420 if (!alloc_bootmem_huge_page(h, nid))
3421 break;
3422 } else {
3423 struct folio *folio;
3424 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3425
3426 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
3427 &node_states[N_MEMORY], NULL);
3428 if (!folio)
3429 break;
3430 list_add(&folio->lru, &folio_list);
3431 }
3432 cond_resched();
3433 }
3434
3435 if (!list_empty(&folio_list))
3436 prep_and_add_allocated_folios(h, &folio_list);
3437
3438 if (i == h->max_huge_pages_node[nid])
3439 return;
3440
3441 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3442 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n",
3443 h->max_huge_pages_node[nid], buf, nid, i);
3444 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3445 h->max_huge_pages_node[nid] = i;
3446 }
3447
hugetlb_hstate_alloc_pages_specific_nodes(struct hstate * h)3448 static bool __init hugetlb_hstate_alloc_pages_specific_nodes(struct hstate *h)
3449 {
3450 int i;
3451 bool node_specific_alloc = false;
3452
3453 for_each_online_node(i) {
3454 if (h->max_huge_pages_node[i] > 0) {
3455 hugetlb_hstate_alloc_pages_onenode(h, i);
3456 node_specific_alloc = true;
3457 }
3458 }
3459
3460 return node_specific_alloc;
3461 }
3462
hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated,struct hstate * h)3463 static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated, struct hstate *h)
3464 {
3465 if (allocated < h->max_huge_pages) {
3466 char buf[32];
3467
3468 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3469 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
3470 h->max_huge_pages, buf, allocated);
3471 h->max_huge_pages = allocated;
3472 }
3473 }
3474
hugetlb_pages_alloc_boot_node(unsigned long start,unsigned long end,void * arg)3475 static void __init hugetlb_pages_alloc_boot_node(unsigned long start, unsigned long end, void *arg)
3476 {
3477 struct hstate *h = (struct hstate *)arg;
3478 int i, num = end - start;
3479 nodemask_t node_alloc_noretry;
3480 LIST_HEAD(folio_list);
3481 int next_node = first_online_node;
3482
3483 /* Bit mask controlling how hard we retry per-node allocations.*/
3484 nodes_clear(node_alloc_noretry);
3485
3486 for (i = 0; i < num; ++i) {
3487 struct folio *folio;
3488
3489 if (hugetlb_vmemmap_optimizable_size(h) &&
3490 (si_mem_available() == 0) && !list_empty(&folio_list)) {
3491 prep_and_add_allocated_folios(h, &folio_list);
3492 INIT_LIST_HEAD(&folio_list);
3493 }
3494 folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
3495 &node_alloc_noretry, &next_node);
3496 if (!folio)
3497 break;
3498
3499 list_move(&folio->lru, &folio_list);
3500 cond_resched();
3501 }
3502
3503 prep_and_add_allocated_folios(h, &folio_list);
3504 }
3505
hugetlb_gigantic_pages_alloc_boot(struct hstate * h)3506 static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h)
3507 {
3508 unsigned long i;
3509
3510 for (i = 0; i < h->max_huge_pages; ++i) {
3511 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3512 break;
3513 cond_resched();
3514 }
3515
3516 return i;
3517 }
3518
hugetlb_pages_alloc_boot(struct hstate * h)3519 static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
3520 {
3521 struct padata_mt_job job = {
3522 .fn_arg = h,
3523 .align = 1,
3524 .numa_aware = true
3525 };
3526
3527 unsigned long jiffies_start;
3528 unsigned long jiffies_end;
3529 unsigned long remaining;
3530
3531 job.thread_fn = hugetlb_pages_alloc_boot_node;
3532
3533 /*
3534 * job.max_threads is 25% of the available cpu threads by default.
3535 *
3536 * On large servers with terabytes of memory, huge page allocation
3537 * can consume a considerably amount of time.
3538 *
3539 * Tests below show how long it takes to allocate 1 TiB of memory with 2MiB huge pages.
3540 * 2MiB huge pages. Using more threads can significantly improve allocation time.
3541 *
3542 * +-----------------------+-------+-------+-------+-------+-------+
3543 * | threads | 8 | 16 | 32 | 64 | 128 |
3544 * +-----------------------+-------+-------+-------+-------+-------+
3545 * | skylake 144 cpus | 44s | 22s | 16s | 19s | 20s |
3546 * | cascade lake 192 cpus | 39s | 20s | 11s | 10s | 9s |
3547 * +-----------------------+-------+-------+-------+-------+-------+
3548 */
3549 if (hugepage_allocation_threads == 0) {
3550 hugepage_allocation_threads = num_online_cpus() / 4;
3551 hugepage_allocation_threads = max(hugepage_allocation_threads, 1);
3552 }
3553
3554 job.max_threads = hugepage_allocation_threads;
3555
3556 jiffies_start = jiffies;
3557 do {
3558 remaining = h->max_huge_pages - h->nr_huge_pages;
3559
3560 job.start = h->nr_huge_pages;
3561 job.size = remaining;
3562 job.min_chunk = remaining / hugepage_allocation_threads;
3563 padata_do_multithreaded(&job);
3564
3565 if (h->nr_huge_pages == h->max_huge_pages)
3566 break;
3567
3568 /*
3569 * Retry only if the vmemmap optimization might have been able to free
3570 * some memory back to the system.
3571 */
3572 if (!hugetlb_vmemmap_optimizable(h))
3573 break;
3574
3575 /* Continue if progress was made in last iteration */
3576 } while (remaining != (h->max_huge_pages - h->nr_huge_pages));
3577
3578 jiffies_end = jiffies;
3579
3580 pr_info("HugeTLB: allocation took %dms with hugepage_allocation_threads=%ld\n",
3581 jiffies_to_msecs(jiffies_end - jiffies_start),
3582 hugepage_allocation_threads);
3583
3584 return h->nr_huge_pages;
3585 }
3586
3587 /*
3588 * NOTE: this routine is called in different contexts for gigantic and
3589 * non-gigantic pages.
3590 * - For gigantic pages, this is called early in the boot process and
3591 * pages are allocated from memblock allocated or something similar.
3592 * Gigantic pages are actually added to pools later with the routine
3593 * gather_bootmem_prealloc.
3594 * - For non-gigantic pages, this is called later in the boot process after
3595 * all of mm is up and functional. Pages are allocated from buddy and
3596 * then added to hugetlb pools.
3597 */
hugetlb_hstate_alloc_pages(struct hstate * h)3598 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
3599 {
3600 unsigned long allocated;
3601
3602 /*
3603 * Skip gigantic hugepages allocation if early CMA
3604 * reservations are not available.
3605 */
3606 if (hstate_is_gigantic(h) && hugetlb_cma_total_size() &&
3607 !hugetlb_early_cma(h)) {
3608 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3609 return;
3610 }
3611
3612 if (!h->max_huge_pages)
3613 return;
3614
3615 /* do node specific alloc */
3616 if (hugetlb_hstate_alloc_pages_specific_nodes(h))
3617 return;
3618
3619 /* below will do all node balanced alloc */
3620 if (hstate_is_gigantic(h))
3621 allocated = hugetlb_gigantic_pages_alloc_boot(h);
3622 else
3623 allocated = hugetlb_pages_alloc_boot(h);
3624
3625 hugetlb_hstate_alloc_pages_errcheck(allocated, h);
3626 }
3627
hugetlb_init_hstates(void)3628 static void __init hugetlb_init_hstates(void)
3629 {
3630 struct hstate *h, *h2;
3631
3632 for_each_hstate(h) {
3633 /*
3634 * Always reset to first_memory_node here, even if
3635 * next_nid_to_alloc was set before - we can't
3636 * reference hugetlb_bootmem_nodes after init, and
3637 * first_memory_node is right for all further allocations.
3638 */
3639 h->next_nid_to_alloc = first_memory_node;
3640 h->next_nid_to_free = first_memory_node;
3641
3642 /* oversize hugepages were init'ed in early boot */
3643 if (!hstate_is_gigantic(h))
3644 hugetlb_hstate_alloc_pages(h);
3645
3646 /*
3647 * Set demote order for each hstate. Note that
3648 * h->demote_order is initially 0.
3649 * - We can not demote gigantic pages if runtime freeing
3650 * is not supported, so skip this.
3651 * - If CMA allocation is possible, we can not demote
3652 * HUGETLB_PAGE_ORDER or smaller size pages.
3653 */
3654 if (hstate_is_gigantic_no_runtime(h))
3655 continue;
3656 if (hugetlb_cma_total_size() && h->order <= HUGETLB_PAGE_ORDER)
3657 continue;
3658 for_each_hstate(h2) {
3659 if (h2 == h)
3660 continue;
3661 if (h2->order < h->order &&
3662 h2->order > h->demote_order)
3663 h->demote_order = h2->order;
3664 }
3665 }
3666 }
3667
report_hugepages(void)3668 static void __init report_hugepages(void)
3669 {
3670 struct hstate *h;
3671 unsigned long nrinvalid;
3672
3673 for_each_hstate(h) {
3674 char buf[32];
3675
3676 nrinvalid = hstate_boot_nrinvalid[hstate_index(h)];
3677 h->max_huge_pages -= nrinvalid;
3678
3679 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3680 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
3681 buf, h->nr_huge_pages);
3682 if (nrinvalid)
3683 pr_info("HugeTLB: %s page size: %lu invalid page%s discarded\n",
3684 buf, nrinvalid, str_plural(nrinvalid));
3685 pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
3686 hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
3687 }
3688 }
3689
3690 #ifdef CONFIG_HIGHMEM
try_to_free_low(struct hstate * h,unsigned long count,nodemask_t * nodes_allowed)3691 static void try_to_free_low(struct hstate *h, unsigned long count,
3692 nodemask_t *nodes_allowed)
3693 {
3694 int i;
3695 LIST_HEAD(page_list);
3696
3697 lockdep_assert_held(&hugetlb_lock);
3698 if (hstate_is_gigantic(h))
3699 return;
3700
3701 /*
3702 * Collect pages to be freed on a list, and free after dropping lock
3703 */
3704 for_each_node_mask(i, *nodes_allowed) {
3705 struct folio *folio, *next;
3706 struct list_head *freel = &h->hugepage_freelists[i];
3707 list_for_each_entry_safe(folio, next, freel, lru) {
3708 if (count >= h->nr_huge_pages)
3709 goto out;
3710 if (folio_test_highmem(folio))
3711 continue;
3712 remove_hugetlb_folio(h, folio, false);
3713 list_add(&folio->lru, &page_list);
3714 }
3715 }
3716
3717 out:
3718 spin_unlock_irq(&hugetlb_lock);
3719 update_and_free_pages_bulk(h, &page_list);
3720 spin_lock_irq(&hugetlb_lock);
3721 }
3722 #else
try_to_free_low(struct hstate * h,unsigned long count,nodemask_t * nodes_allowed)3723 static inline void try_to_free_low(struct hstate *h, unsigned long count,
3724 nodemask_t *nodes_allowed)
3725 {
3726 }
3727 #endif
3728
3729 /*
3730 * Increment or decrement surplus_huge_pages. Keep node-specific counters
3731 * balanced by operating on them in a round-robin fashion.
3732 * Returns 1 if an adjustment was made.
3733 */
adjust_pool_surplus(struct hstate * h,nodemask_t * nodes_allowed,int delta)3734 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
3735 int delta)
3736 {
3737 int nr_nodes, node;
3738
3739 lockdep_assert_held(&hugetlb_lock);
3740 VM_BUG_ON(delta != -1 && delta != 1);
3741
3742 if (delta < 0) {
3743 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, nodes_allowed) {
3744 if (h->surplus_huge_pages_node[node])
3745 goto found;
3746 }
3747 } else {
3748 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3749 if (h->surplus_huge_pages_node[node] <
3750 h->nr_huge_pages_node[node])
3751 goto found;
3752 }
3753 }
3754 return 0;
3755
3756 found:
3757 h->surplus_huge_pages += delta;
3758 h->surplus_huge_pages_node[node] += delta;
3759 return 1;
3760 }
3761
3762 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
set_max_huge_pages(struct hstate * h,unsigned long count,int nid,nodemask_t * nodes_allowed)3763 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
3764 nodemask_t *nodes_allowed)
3765 {
3766 unsigned long persistent_free_count;
3767 unsigned long min_count;
3768 unsigned long allocated;
3769 struct folio *folio;
3770 LIST_HEAD(page_list);
3771 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
3772
3773 /*
3774 * Bit mask controlling how hard we retry per-node allocations.
3775 * If we can not allocate the bit mask, do not attempt to allocate
3776 * the requested huge pages.
3777 */
3778 if (node_alloc_noretry)
3779 nodes_clear(*node_alloc_noretry);
3780 else
3781 return -ENOMEM;
3782
3783 /*
3784 * resize_lock mutex prevents concurrent adjustments to number of
3785 * pages in hstate via the proc/sysfs interfaces.
3786 */
3787 mutex_lock(&h->resize_lock);
3788 flush_free_hpage_work(h);
3789 spin_lock_irq(&hugetlb_lock);
3790
3791 /*
3792 * Check for a node specific request.
3793 * Changing node specific huge page count may require a corresponding
3794 * change to the global count. In any case, the passed node mask
3795 * (nodes_allowed) will restrict alloc/free to the specified node.
3796 */
3797 if (nid != NUMA_NO_NODE) {
3798 unsigned long old_count = count;
3799
3800 count += persistent_huge_pages(h) -
3801 (h->nr_huge_pages_node[nid] -
3802 h->surplus_huge_pages_node[nid]);
3803 /*
3804 * User may have specified a large count value which caused the
3805 * above calculation to overflow. In this case, they wanted
3806 * to allocate as many huge pages as possible. Set count to
3807 * largest possible value to align with their intention.
3808 */
3809 if (count < old_count)
3810 count = ULONG_MAX;
3811 }
3812
3813 /*
3814 * Gigantic pages runtime allocation depend on the capability for large
3815 * page range allocation.
3816 * If the system does not provide this feature, return an error when
3817 * the user tries to allocate gigantic pages but let the user free the
3818 * boottime allocated gigantic pages.
3819 */
3820 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
3821 if (count > persistent_huge_pages(h)) {
3822 spin_unlock_irq(&hugetlb_lock);
3823 mutex_unlock(&h->resize_lock);
3824 NODEMASK_FREE(node_alloc_noretry);
3825 return -EINVAL;
3826 }
3827 /* Fall through to decrease pool */
3828 }
3829
3830 /*
3831 * Increase the pool size
3832 * First take pages out of surplus state. Then make up the
3833 * remaining difference by allocating fresh huge pages.
3834 *
3835 * We might race with alloc_surplus_hugetlb_folio() here and be unable
3836 * to convert a surplus huge page to a normal huge page. That is
3837 * not critical, though, it just means the overall size of the
3838 * pool might be one hugepage larger than it needs to be, but
3839 * within all the constraints specified by the sysctls.
3840 */
3841 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
3842 if (!adjust_pool_surplus(h, nodes_allowed, -1))
3843 break;
3844 }
3845
3846 allocated = 0;
3847 while (count > (persistent_huge_pages(h) + allocated)) {
3848 /*
3849 * If this allocation races such that we no longer need the
3850 * page, free_huge_folio will handle it by freeing the page
3851 * and reducing the surplus.
3852 */
3853 spin_unlock_irq(&hugetlb_lock);
3854
3855 /* yield cpu to avoid soft lockup */
3856 cond_resched();
3857
3858 folio = alloc_pool_huge_folio(h, nodes_allowed,
3859 node_alloc_noretry,
3860 &h->next_nid_to_alloc);
3861 if (!folio) {
3862 prep_and_add_allocated_folios(h, &page_list);
3863 spin_lock_irq(&hugetlb_lock);
3864 goto out;
3865 }
3866
3867 list_add(&folio->lru, &page_list);
3868 allocated++;
3869
3870 /* Bail for signals. Probably ctrl-c from user */
3871 if (signal_pending(current)) {
3872 prep_and_add_allocated_folios(h, &page_list);
3873 spin_lock_irq(&hugetlb_lock);
3874 goto out;
3875 }
3876
3877 spin_lock_irq(&hugetlb_lock);
3878 }
3879
3880 /* Add allocated pages to the pool */
3881 if (!list_empty(&page_list)) {
3882 spin_unlock_irq(&hugetlb_lock);
3883 prep_and_add_allocated_folios(h, &page_list);
3884 spin_lock_irq(&hugetlb_lock);
3885 }
3886
3887 /*
3888 * Decrease the pool size
3889 * First return free pages to the buddy allocator (being careful
3890 * to keep enough around to satisfy reservations). Then place
3891 * pages into surplus state as needed so the pool will shrink
3892 * to the desired size as pages become free.
3893 *
3894 * By placing pages into the surplus state independent of the
3895 * overcommit value, we are allowing the surplus pool size to
3896 * exceed overcommit. There are few sane options here. Since
3897 * alloc_surplus_hugetlb_folio() is checking the global counter,
3898 * though, we'll note that we're not allowed to exceed surplus
3899 * and won't grow the pool anywhere else. Not until one of the
3900 * sysctls are changed, or the surplus pages go out of use.
3901 *
3902 * min_count is the expected number of persistent pages, we
3903 * shouldn't calculate min_count by using
3904 * resv_huge_pages + persistent_huge_pages() - free_huge_pages,
3905 * because there may exist free surplus huge pages, and this will
3906 * lead to subtracting twice. Free surplus huge pages come from HVO
3907 * failing to restore vmemmap, see comments in the callers of
3908 * hugetlb_vmemmap_restore_folio(). Thus, we should calculate
3909 * persistent free count first.
3910 */
3911 persistent_free_count = h->free_huge_pages;
3912 if (h->free_huge_pages > persistent_huge_pages(h)) {
3913 if (h->free_huge_pages > h->surplus_huge_pages)
3914 persistent_free_count -= h->surplus_huge_pages;
3915 else
3916 persistent_free_count = 0;
3917 }
3918 min_count = h->resv_huge_pages + persistent_huge_pages(h) - persistent_free_count;
3919 min_count = max(count, min_count);
3920 try_to_free_low(h, min_count, nodes_allowed);
3921
3922 /*
3923 * Collect pages to be removed on list without dropping lock
3924 */
3925 while (min_count < persistent_huge_pages(h)) {
3926 folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0);
3927 if (!folio)
3928 break;
3929
3930 list_add(&folio->lru, &page_list);
3931 }
3932 /* free the pages after dropping lock */
3933 spin_unlock_irq(&hugetlb_lock);
3934 update_and_free_pages_bulk(h, &page_list);
3935 flush_free_hpage_work(h);
3936 spin_lock_irq(&hugetlb_lock);
3937
3938 while (count < persistent_huge_pages(h)) {
3939 if (!adjust_pool_surplus(h, nodes_allowed, 1))
3940 break;
3941 }
3942 out:
3943 h->max_huge_pages = persistent_huge_pages(h);
3944 spin_unlock_irq(&hugetlb_lock);
3945 mutex_unlock(&h->resize_lock);
3946
3947 NODEMASK_FREE(node_alloc_noretry);
3948
3949 return 0;
3950 }
3951
demote_free_hugetlb_folios(struct hstate * src,struct hstate * dst,struct list_head * src_list)3952 static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst,
3953 struct list_head *src_list)
3954 {
3955 long rc;
3956 struct folio *folio, *next;
3957 LIST_HEAD(dst_list);
3958 LIST_HEAD(ret_list);
3959
3960 rc = hugetlb_vmemmap_restore_folios(src, src_list, &ret_list);
3961 list_splice_init(&ret_list, src_list);
3962
3963 /*
3964 * Taking target hstate mutex synchronizes with set_max_huge_pages.
3965 * Without the mutex, pages added to target hstate could be marked
3966 * as surplus.
3967 *
3968 * Note that we already hold src->resize_lock. To prevent deadlock,
3969 * use the convention of always taking larger size hstate mutex first.
3970 */
3971 mutex_lock(&dst->resize_lock);
3972
3973 list_for_each_entry_safe(folio, next, src_list, lru) {
3974 int i;
3975 bool cma;
3976
3977 if (folio_test_hugetlb_vmemmap_optimized(folio))
3978 continue;
3979
3980 cma = folio_test_hugetlb_cma(folio);
3981
3982 list_del(&folio->lru);
3983
3984 split_page_owner(&folio->page, huge_page_order(src), huge_page_order(dst));
3985 pgalloc_tag_split(folio, huge_page_order(src), huge_page_order(dst));
3986
3987 for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) {
3988 struct page *page = folio_page(folio, i);
3989 /* Careful: see __split_huge_page_tail() */
3990 struct folio *new_folio = (struct folio *)page;
3991
3992 clear_compound_head(page);
3993 prep_compound_page(page, dst->order);
3994
3995 new_folio->mapping = NULL;
3996 init_new_hugetlb_folio(new_folio);
3997 /* Copy the CMA flag so that it is freed correctly */
3998 if (cma)
3999 folio_set_hugetlb_cma(new_folio);
4000 list_add(&new_folio->lru, &dst_list);
4001 }
4002 }
4003
4004 prep_and_add_allocated_folios(dst, &dst_list);
4005
4006 mutex_unlock(&dst->resize_lock);
4007
4008 return rc;
4009 }
4010
demote_pool_huge_page(struct hstate * src,nodemask_t * nodes_allowed,unsigned long nr_to_demote)4011 long demote_pool_huge_page(struct hstate *src, nodemask_t *nodes_allowed,
4012 unsigned long nr_to_demote)
4013 __must_hold(&hugetlb_lock)
4014 {
4015 int nr_nodes, node;
4016 struct hstate *dst;
4017 long rc = 0;
4018 long nr_demoted = 0;
4019
4020 lockdep_assert_held(&hugetlb_lock);
4021
4022 /* We should never get here if no demote order */
4023 if (!src->demote_order) {
4024 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
4025 return -EINVAL; /* internal error */
4026 }
4027 dst = size_to_hstate(PAGE_SIZE << src->demote_order);
4028
4029 for_each_node_mask_to_free(src, nr_nodes, node, nodes_allowed) {
4030 LIST_HEAD(list);
4031 struct folio *folio, *next;
4032
4033 list_for_each_entry_safe(folio, next, &src->hugepage_freelists[node], lru) {
4034 if (folio_test_hwpoison(folio))
4035 continue;
4036
4037 remove_hugetlb_folio(src, folio, false);
4038 list_add(&folio->lru, &list);
4039
4040 if (++nr_demoted == nr_to_demote)
4041 break;
4042 }
4043
4044 spin_unlock_irq(&hugetlb_lock);
4045
4046 rc = demote_free_hugetlb_folios(src, dst, &list);
4047
4048 spin_lock_irq(&hugetlb_lock);
4049
4050 list_for_each_entry_safe(folio, next, &list, lru) {
4051 list_del(&folio->lru);
4052 add_hugetlb_folio(src, folio, false);
4053
4054 nr_demoted--;
4055 }
4056
4057 if (rc < 0 || nr_demoted == nr_to_demote)
4058 break;
4059 }
4060
4061 /*
4062 * Not absolutely necessary, but for consistency update max_huge_pages
4063 * based on pool changes for the demoted page.
4064 */
4065 src->max_huge_pages -= nr_demoted;
4066 dst->max_huge_pages += nr_demoted << (huge_page_order(src) - huge_page_order(dst));
4067
4068 if (rc < 0)
4069 return rc;
4070
4071 if (nr_demoted)
4072 return nr_demoted;
4073 /*
4074 * Only way to get here is if all pages on free lists are poisoned.
4075 * Return -EBUSY so that caller will not retry.
4076 */
4077 return -EBUSY;
4078 }
4079
__nr_hugepages_store_common(bool obey_mempolicy,struct hstate * h,int nid,unsigned long count,size_t len)4080 ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
4081 struct hstate *h, int nid,
4082 unsigned long count, size_t len)
4083 {
4084 int err;
4085 nodemask_t nodes_allowed, *n_mask;
4086
4087 if (hstate_is_gigantic_no_runtime(h))
4088 return -EINVAL;
4089
4090 if (nid == NUMA_NO_NODE) {
4091 /*
4092 * global hstate attribute
4093 */
4094 if (!(obey_mempolicy &&
4095 init_nodemask_of_mempolicy(&nodes_allowed)))
4096 n_mask = &node_states[N_MEMORY];
4097 else
4098 n_mask = &nodes_allowed;
4099 } else {
4100 /*
4101 * Node specific request. count adjustment happens in
4102 * set_max_huge_pages() after acquiring hugetlb_lock.
4103 */
4104 init_nodemask_of_node(&nodes_allowed, nid);
4105 n_mask = &nodes_allowed;
4106 }
4107
4108 err = set_max_huge_pages(h, count, nid, n_mask);
4109
4110 return err ? err : len;
4111 }
4112
hugetlb_init(void)4113 static int __init hugetlb_init(void)
4114 {
4115 int i;
4116
4117 BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
4118 __NR_HPAGEFLAGS);
4119 BUILD_BUG_ON_INVALID(HUGETLB_PAGE_ORDER > MAX_FOLIO_ORDER);
4120
4121 if (!hugepages_supported()) {
4122 if (hugetlb_max_hstate || default_hstate_max_huge_pages)
4123 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
4124 return 0;
4125 }
4126
4127 /*
4128 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some
4129 * architectures depend on setup being done here.
4130 */
4131 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
4132 if (!parsed_default_hugepagesz) {
4133 /*
4134 * If we did not parse a default huge page size, set
4135 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4136 * number of huge pages for this default size was implicitly
4137 * specified, set that here as well.
4138 * Note that the implicit setting will overwrite an explicit
4139 * setting. A warning will be printed in this case.
4140 */
4141 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
4142 if (default_hstate_max_huge_pages) {
4143 if (default_hstate.max_huge_pages) {
4144 char buf[32];
4145
4146 string_get_size(huge_page_size(&default_hstate),
4147 1, STRING_UNITS_2, buf, 32);
4148 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4149 default_hstate.max_huge_pages, buf);
4150 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4151 default_hstate_max_huge_pages);
4152 }
4153 default_hstate.max_huge_pages =
4154 default_hstate_max_huge_pages;
4155
4156 for_each_online_node(i)
4157 default_hstate.max_huge_pages_node[i] =
4158 default_hugepages_in_node[i];
4159 }
4160 }
4161
4162 hugetlb_cma_check();
4163 hugetlb_init_hstates();
4164 gather_bootmem_prealloc();
4165 report_hugepages();
4166
4167 hugetlb_sysfs_init();
4168 hugetlb_cgroup_file_init();
4169 hugetlb_sysctl_init();
4170
4171 #ifdef CONFIG_SMP
4172 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
4173 #else
4174 num_fault_mutexes = 1;
4175 #endif
4176 hugetlb_fault_mutex_table =
4177 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
4178 GFP_KERNEL);
4179 BUG_ON(!hugetlb_fault_mutex_table);
4180
4181 for (i = 0; i < num_fault_mutexes; i++)
4182 mutex_init(&hugetlb_fault_mutex_table[i]);
4183 return 0;
4184 }
4185 subsys_initcall(hugetlb_init);
4186
4187 /* Overwritten by architectures with more huge page sizes */
__init(weak)4188 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
4189 {
4190 return size == HPAGE_SIZE;
4191 }
4192
hugetlb_add_hstate(unsigned int order)4193 void __init hugetlb_add_hstate(unsigned int order)
4194 {
4195 struct hstate *h;
4196 unsigned long i;
4197
4198 if (size_to_hstate(PAGE_SIZE << order)) {
4199 return;
4200 }
4201 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
4202 BUG_ON(order < order_base_2(__NR_USED_SUBPAGE));
4203 WARN_ON(order > MAX_FOLIO_ORDER);
4204 h = &hstates[hugetlb_max_hstate++];
4205 __mutex_init(&h->resize_lock, "resize mutex", &h->resize_key);
4206 h->order = order;
4207 h->mask = ~(huge_page_size(h) - 1);
4208 for (i = 0; i < MAX_NUMNODES; ++i)
4209 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
4210 INIT_LIST_HEAD(&h->hugepage_activelist);
4211 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
4212 huge_page_size(h)/SZ_1K);
4213
4214 parsed_hstate = h;
4215 }
4216
hugetlb_node_alloc_supported(void)4217 bool __init __weak hugetlb_node_alloc_supported(void)
4218 {
4219 return true;
4220 }
4221
hugepages_clear_pages_in_node(void)4222 static void __init hugepages_clear_pages_in_node(void)
4223 {
4224 if (!hugetlb_max_hstate) {
4225 default_hstate_max_huge_pages = 0;
4226 memset(default_hugepages_in_node, 0,
4227 sizeof(default_hugepages_in_node));
4228 } else {
4229 parsed_hstate->max_huge_pages = 0;
4230 memset(parsed_hstate->max_huge_pages_node, 0,
4231 sizeof(parsed_hstate->max_huge_pages_node));
4232 }
4233 }
4234
hugetlb_add_param(char * s,int (* setup)(char *))4235 static __init int hugetlb_add_param(char *s, int (*setup)(char *))
4236 {
4237 size_t len;
4238 char *p;
4239
4240 if (hugetlb_param_index >= HUGE_MAX_CMDLINE_ARGS)
4241 return -EINVAL;
4242
4243 len = strlen(s) + 1;
4244 if (len + hstate_cmdline_index > sizeof(hstate_cmdline_buf))
4245 return -EINVAL;
4246
4247 p = &hstate_cmdline_buf[hstate_cmdline_index];
4248 memcpy(p, s, len);
4249 hstate_cmdline_index += len;
4250
4251 hugetlb_params[hugetlb_param_index].val = p;
4252 hugetlb_params[hugetlb_param_index].setup = setup;
4253
4254 hugetlb_param_index++;
4255
4256 return 0;
4257 }
4258
hugetlb_parse_params(void)4259 static __init void hugetlb_parse_params(void)
4260 {
4261 int i;
4262 struct hugetlb_cmdline *hcp;
4263
4264 for (i = 0; i < hugetlb_param_index; i++) {
4265 hcp = &hugetlb_params[i];
4266
4267 hcp->setup(hcp->val);
4268 }
4269
4270 hugetlb_cma_validate_params();
4271 }
4272
4273 /*
4274 * hugepages command line processing
4275 * hugepages normally follows a valid hugepagsz or default_hugepagsz
4276 * specification. If not, ignore the hugepages value. hugepages can also
4277 * be the first huge page command line option in which case it implicitly
4278 * specifies the number of huge pages for the default size.
4279 */
hugepages_setup(char * s)4280 static int __init hugepages_setup(char *s)
4281 {
4282 unsigned long *mhp;
4283 static unsigned long *last_mhp;
4284 int node = NUMA_NO_NODE;
4285 int count;
4286 unsigned long tmp;
4287 char *p = s;
4288
4289 if (!parsed_valid_hugepagesz) {
4290 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
4291 parsed_valid_hugepagesz = true;
4292 return -EINVAL;
4293 }
4294
4295 /*
4296 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4297 * yet, so this hugepages= parameter goes to the "default hstate".
4298 * Otherwise, it goes with the previously parsed hugepagesz or
4299 * default_hugepagesz.
4300 */
4301 else if (!hugetlb_max_hstate)
4302 mhp = &default_hstate_max_huge_pages;
4303 else
4304 mhp = &parsed_hstate->max_huge_pages;
4305
4306 if (mhp == last_mhp) {
4307 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
4308 return 1;
4309 }
4310
4311 while (*p) {
4312 count = 0;
4313 if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4314 goto invalid;
4315 /* Parameter is node format */
4316 if (p[count] == ':') {
4317 if (!hugetlb_node_alloc_supported()) {
4318 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4319 return 1;
4320 }
4321 if (tmp >= MAX_NUMNODES || !node_online(tmp))
4322 goto invalid;
4323 node = array_index_nospec(tmp, MAX_NUMNODES);
4324 p += count + 1;
4325 /* Parse hugepages */
4326 if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4327 goto invalid;
4328 if (!hugetlb_max_hstate)
4329 default_hugepages_in_node[node] = tmp;
4330 else
4331 parsed_hstate->max_huge_pages_node[node] = tmp;
4332 *mhp += tmp;
4333 /* Go to parse next node*/
4334 if (p[count] == ',')
4335 p += count + 1;
4336 else
4337 break;
4338 } else {
4339 if (p != s)
4340 goto invalid;
4341 *mhp = tmp;
4342 break;
4343 }
4344 }
4345
4346 last_mhp = mhp;
4347
4348 return 0;
4349
4350 invalid:
4351 pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
4352 hugepages_clear_pages_in_node();
4353 return -EINVAL;
4354 }
4355 hugetlb_early_param("hugepages", hugepages_setup);
4356
4357 /*
4358 * hugepagesz command line processing
4359 * A specific huge page size can only be specified once with hugepagesz.
4360 * hugepagesz is followed by hugepages on the command line. The global
4361 * variable 'parsed_valid_hugepagesz' is used to determine if prior
4362 * hugepagesz argument was valid.
4363 */
hugepagesz_setup(char * s)4364 static int __init hugepagesz_setup(char *s)
4365 {
4366 unsigned long size;
4367 struct hstate *h;
4368
4369 parsed_valid_hugepagesz = false;
4370 size = (unsigned long)memparse(s, NULL);
4371
4372 if (!arch_hugetlb_valid_size(size)) {
4373 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
4374 return -EINVAL;
4375 }
4376
4377 h = size_to_hstate(size);
4378 if (h) {
4379 /*
4380 * hstate for this size already exists. This is normally
4381 * an error, but is allowed if the existing hstate is the
4382 * default hstate. More specifically, it is only allowed if
4383 * the number of huge pages for the default hstate was not
4384 * previously specified.
4385 */
4386 if (!parsed_default_hugepagesz || h != &default_hstate ||
4387 default_hstate.max_huge_pages) {
4388 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
4389 return -EINVAL;
4390 }
4391
4392 /*
4393 * No need to call hugetlb_add_hstate() as hstate already
4394 * exists. But, do set parsed_hstate so that a following
4395 * hugepages= parameter will be applied to this hstate.
4396 */
4397 parsed_hstate = h;
4398 parsed_valid_hugepagesz = true;
4399 return 0;
4400 }
4401
4402 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4403 parsed_valid_hugepagesz = true;
4404 return 0;
4405 }
4406 hugetlb_early_param("hugepagesz", hugepagesz_setup);
4407
4408 /*
4409 * default_hugepagesz command line input
4410 * Only one instance of default_hugepagesz allowed on command line.
4411 */
default_hugepagesz_setup(char * s)4412 static int __init default_hugepagesz_setup(char *s)
4413 {
4414 unsigned long size;
4415 int i;
4416
4417 parsed_valid_hugepagesz = false;
4418 if (parsed_default_hugepagesz) {
4419 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
4420 return -EINVAL;
4421 }
4422
4423 size = (unsigned long)memparse(s, NULL);
4424
4425 if (!arch_hugetlb_valid_size(size)) {
4426 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
4427 return -EINVAL;
4428 }
4429
4430 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4431 parsed_valid_hugepagesz = true;
4432 parsed_default_hugepagesz = true;
4433 default_hstate_idx = hstate_index(size_to_hstate(size));
4434
4435 /*
4436 * The number of default huge pages (for this size) could have been
4437 * specified as the first hugetlb parameter: hugepages=X. If so,
4438 * then default_hstate_max_huge_pages is set. If the default huge
4439 * page size is gigantic (> MAX_PAGE_ORDER), then the pages must be
4440 * allocated here from bootmem allocator.
4441 */
4442 if (default_hstate_max_huge_pages) {
4443 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
4444 /*
4445 * Since this is an early parameter, we can't check
4446 * NUMA node state yet, so loop through MAX_NUMNODES.
4447 */
4448 for (i = 0; i < MAX_NUMNODES; i++) {
4449 if (default_hugepages_in_node[i] != 0)
4450 default_hstate.max_huge_pages_node[i] =
4451 default_hugepages_in_node[i];
4452 }
4453 default_hstate_max_huge_pages = 0;
4454 }
4455
4456 return 0;
4457 }
4458 hugetlb_early_param("default_hugepagesz", default_hugepagesz_setup);
4459
hugetlb_bootmem_set_nodes(void)4460 void __init hugetlb_bootmem_set_nodes(void)
4461 {
4462 int i, nid;
4463 unsigned long start_pfn, end_pfn;
4464
4465 if (!nodes_empty(hugetlb_bootmem_nodes))
4466 return;
4467
4468 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
4469 if (end_pfn > start_pfn)
4470 node_set(nid, hugetlb_bootmem_nodes);
4471 }
4472 }
4473
4474 static bool __hugetlb_bootmem_allocated __initdata;
4475
hugetlb_bootmem_allocated(void)4476 bool __init hugetlb_bootmem_allocated(void)
4477 {
4478 return __hugetlb_bootmem_allocated;
4479 }
4480
hugetlb_bootmem_alloc(void)4481 void __init hugetlb_bootmem_alloc(void)
4482 {
4483 struct hstate *h;
4484 int i;
4485
4486 if (__hugetlb_bootmem_allocated)
4487 return;
4488
4489 hugetlb_bootmem_set_nodes();
4490
4491 for (i = 0; i < MAX_NUMNODES; i++)
4492 INIT_LIST_HEAD(&huge_boot_pages[i]);
4493
4494 hugetlb_parse_params();
4495
4496 for_each_hstate(h) {
4497 h->next_nid_to_alloc = first_online_node;
4498
4499 if (hstate_is_gigantic(h))
4500 hugetlb_hstate_alloc_pages(h);
4501 }
4502
4503 __hugetlb_bootmem_allocated = true;
4504 }
4505
4506 /*
4507 * hugepage_alloc_threads command line parsing.
4508 *
4509 * When set, use this specific number of threads for the boot
4510 * allocation of hugepages.
4511 */
hugepage_alloc_threads_setup(char * s)4512 static int __init hugepage_alloc_threads_setup(char *s)
4513 {
4514 unsigned long allocation_threads;
4515
4516 if (kstrtoul(s, 0, &allocation_threads) != 0)
4517 return 1;
4518
4519 if (allocation_threads == 0)
4520 return 1;
4521
4522 hugepage_allocation_threads = allocation_threads;
4523
4524 return 1;
4525 }
4526 __setup("hugepage_alloc_threads=", hugepage_alloc_threads_setup);
4527
allowed_mems_nr(struct hstate * h)4528 static unsigned int allowed_mems_nr(struct hstate *h)
4529 {
4530 int node;
4531 unsigned int nr = 0;
4532 nodemask_t *mbind_nodemask;
4533 unsigned int *array = h->free_huge_pages_node;
4534 gfp_t gfp_mask = htlb_alloc_mask(h);
4535
4536 mbind_nodemask = policy_mbind_nodemask(gfp_mask);
4537 for_each_node_mask(node, cpuset_current_mems_allowed) {
4538 if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
4539 nr += array[node];
4540 }
4541
4542 return nr;
4543 }
4544
hugetlb_report_meminfo(struct seq_file * m)4545 void hugetlb_report_meminfo(struct seq_file *m)
4546 {
4547 struct hstate *h;
4548 unsigned long total = 0;
4549
4550 if (!hugepages_supported())
4551 return;
4552
4553 for_each_hstate(h) {
4554 unsigned long count = h->nr_huge_pages;
4555
4556 total += huge_page_size(h) * count;
4557
4558 if (h == &default_hstate)
4559 seq_printf(m,
4560 "HugePages_Total: %5lu\n"
4561 "HugePages_Free: %5lu\n"
4562 "HugePages_Rsvd: %5lu\n"
4563 "HugePages_Surp: %5lu\n"
4564 "Hugepagesize: %8lu kB\n",
4565 count,
4566 h->free_huge_pages,
4567 h->resv_huge_pages,
4568 h->surplus_huge_pages,
4569 huge_page_size(h) / SZ_1K);
4570 }
4571
4572 seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K);
4573 }
4574
hugetlb_report_node_meminfo(char * buf,int len,int nid)4575 int hugetlb_report_node_meminfo(char *buf, int len, int nid)
4576 {
4577 struct hstate *h = &default_hstate;
4578
4579 if (!hugepages_supported())
4580 return 0;
4581
4582 return sysfs_emit_at(buf, len,
4583 "Node %d HugePages_Total: %5u\n"
4584 "Node %d HugePages_Free: %5u\n"
4585 "Node %d HugePages_Surp: %5u\n",
4586 nid, h->nr_huge_pages_node[nid],
4587 nid, h->free_huge_pages_node[nid],
4588 nid, h->surplus_huge_pages_node[nid]);
4589 }
4590
hugetlb_show_meminfo_node(int nid)4591 void hugetlb_show_meminfo_node(int nid)
4592 {
4593 struct hstate *h;
4594
4595 if (!hugepages_supported())
4596 return;
4597
4598 for_each_hstate(h)
4599 printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
4600 nid,
4601 h->nr_huge_pages_node[nid],
4602 h->free_huge_pages_node[nid],
4603 h->surplus_huge_pages_node[nid],
4604 huge_page_size(h) / SZ_1K);
4605 }
4606
hugetlb_report_usage(struct seq_file * m,struct mm_struct * mm)4607 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
4608 {
4609 seq_printf(m, "HugetlbPages:\t%8lu kB\n",
4610 K(atomic_long_read(&mm->hugetlb_usage)));
4611 }
4612
4613 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
hugetlb_total_pages(void)4614 unsigned long hugetlb_total_pages(void)
4615 {
4616 struct hstate *h;
4617 unsigned long nr_total_pages = 0;
4618
4619 for_each_hstate(h)
4620 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
4621 return nr_total_pages;
4622 }
4623
hugetlb_acct_memory(struct hstate * h,long delta)4624 static int hugetlb_acct_memory(struct hstate *h, long delta)
4625 {
4626 int ret = -ENOMEM;
4627
4628 if (!delta)
4629 return 0;
4630
4631 spin_lock_irq(&hugetlb_lock);
4632 /*
4633 * When cpuset is configured, it breaks the strict hugetlb page
4634 * reservation as the accounting is done on a global variable. Such
4635 * reservation is completely rubbish in the presence of cpuset because
4636 * the reservation is not checked against page availability for the
4637 * current cpuset. Application can still potentially OOM'ed by kernel
4638 * with lack of free htlb page in cpuset that the task is in.
4639 * Attempt to enforce strict accounting with cpuset is almost
4640 * impossible (or too ugly) because cpuset is too fluid that
4641 * task or memory node can be dynamically moved between cpusets.
4642 *
4643 * The change of semantics for shared hugetlb mapping with cpuset is
4644 * undesirable. However, in order to preserve some of the semantics,
4645 * we fall back to check against current free page availability as
4646 * a best attempt and hopefully to minimize the impact of changing
4647 * semantics that cpuset has.
4648 *
4649 * Apart from cpuset, we also have memory policy mechanism that
4650 * also determines from which node the kernel will allocate memory
4651 * in a NUMA system. So similar to cpuset, we also should consider
4652 * the memory policy of the current task. Similar to the description
4653 * above.
4654 */
4655 if (delta > 0) {
4656 if (gather_surplus_pages(h, delta) < 0)
4657 goto out;
4658
4659 if (delta > allowed_mems_nr(h)) {
4660 return_unused_surplus_pages(h, delta);
4661 goto out;
4662 }
4663 }
4664
4665 ret = 0;
4666 if (delta < 0)
4667 return_unused_surplus_pages(h, (unsigned long) -delta);
4668
4669 out:
4670 spin_unlock_irq(&hugetlb_lock);
4671 return ret;
4672 }
4673
hugetlb_vm_op_open(struct vm_area_struct * vma)4674 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
4675 {
4676 struct resv_map *resv = vma_resv_map(vma);
4677
4678 /*
4679 * HPAGE_RESV_OWNER indicates a private mapping.
4680 * This new VMA should share its siblings reservation map if present.
4681 * The VMA will only ever have a valid reservation map pointer where
4682 * it is being copied for another still existing VMA. As that VMA
4683 * has a reference to the reservation map it cannot disappear until
4684 * after this open call completes. It is therefore safe to take a
4685 * new reference here without additional locking.
4686 */
4687 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
4688 resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
4689 kref_get(&resv->refs);
4690 }
4691
4692 /*
4693 * vma_lock structure for sharable mappings is vma specific.
4694 * Clear old pointer (if copied via vm_area_dup) and allocate
4695 * new structure. Before clearing, make sure vma_lock is not
4696 * for this vma.
4697 */
4698 if (vma->vm_flags & VM_MAYSHARE) {
4699 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
4700
4701 if (vma_lock) {
4702 if (vma_lock->vma != vma) {
4703 vma->vm_private_data = NULL;
4704 hugetlb_vma_lock_alloc(vma);
4705 } else
4706 pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
4707 } else
4708 hugetlb_vma_lock_alloc(vma);
4709 }
4710 }
4711
hugetlb_vm_op_close(struct vm_area_struct * vma)4712 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
4713 {
4714 struct hstate *h = hstate_vma(vma);
4715 struct resv_map *resv;
4716 struct hugepage_subpool *spool = subpool_vma(vma);
4717 unsigned long reserve, start, end;
4718 long gbl_reserve;
4719
4720 hugetlb_vma_lock_free(vma);
4721
4722 resv = vma_resv_map(vma);
4723 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4724 return;
4725
4726 start = vma_hugecache_offset(h, vma, vma->vm_start);
4727 end = vma_hugecache_offset(h, vma, vma->vm_end);
4728
4729 reserve = (end - start) - region_count(resv, start, end);
4730 hugetlb_cgroup_uncharge_counter(resv, start, end);
4731 if (reserve) {
4732 /*
4733 * Decrement reserve counts. The global reserve count may be
4734 * adjusted if the subpool has a minimum size.
4735 */
4736 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
4737 hugetlb_acct_memory(h, -gbl_reserve);
4738 }
4739
4740 kref_put(&resv->refs, resv_map_release);
4741 }
4742
hugetlb_vm_op_split(struct vm_area_struct * vma,unsigned long addr)4743 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
4744 {
4745 if (addr & ~(huge_page_mask(hstate_vma(vma))))
4746 return -EINVAL;
4747 return 0;
4748 }
4749
hugetlb_split(struct vm_area_struct * vma,unsigned long addr)4750 void hugetlb_split(struct vm_area_struct *vma, unsigned long addr)
4751 {
4752 /*
4753 * PMD sharing is only possible for PUD_SIZE-aligned address ranges
4754 * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
4755 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
4756 * This function is called in the middle of a VMA split operation, with
4757 * MM, VMA and rmap all write-locked to prevent concurrent page table
4758 * walks (except hardware and gup_fast()).
4759 */
4760 vma_assert_write_locked(vma);
4761 i_mmap_assert_write_locked(vma->vm_file->f_mapping);
4762
4763 if (addr & ~PUD_MASK) {
4764 unsigned long floor = addr & PUD_MASK;
4765 unsigned long ceil = floor + PUD_SIZE;
4766
4767 if (floor >= vma->vm_start && ceil <= vma->vm_end) {
4768 /*
4769 * Locking:
4770 * Use take_locks=false here.
4771 * The file rmap lock is already held.
4772 * The hugetlb VMA lock can't be taken when we already
4773 * hold the file rmap lock, and we don't need it because
4774 * its purpose is to synchronize against concurrent page
4775 * table walks, which are not possible thanks to the
4776 * locks held by our caller.
4777 */
4778 hugetlb_unshare_pmds(vma, floor, ceil, /* take_locks = */ false);
4779 }
4780 }
4781 }
4782
hugetlb_vm_op_pagesize(struct vm_area_struct * vma)4783 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
4784 {
4785 return huge_page_size(hstate_vma(vma));
4786 }
4787
4788 /*
4789 * We cannot handle pagefaults against hugetlb pages at all. They cause
4790 * handle_mm_fault() to try to instantiate regular-sized pages in the
4791 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get
4792 * this far.
4793 */
hugetlb_vm_op_fault(struct vm_fault * vmf)4794 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
4795 {
4796 BUG();
4797 return 0;
4798 }
4799
4800 /*
4801 * When a new function is introduced to vm_operations_struct and added
4802 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
4803 * This is because under System V memory model, mappings created via
4804 * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
4805 * their original vm_ops are overwritten with shm_vm_ops.
4806 */
4807 const struct vm_operations_struct hugetlb_vm_ops = {
4808 .fault = hugetlb_vm_op_fault,
4809 .open = hugetlb_vm_op_open,
4810 .close = hugetlb_vm_op_close,
4811 .may_split = hugetlb_vm_op_split,
4812 .pagesize = hugetlb_vm_op_pagesize,
4813 };
4814
make_huge_pte(struct vm_area_struct * vma,struct folio * folio,bool try_mkwrite)4815 static pte_t make_huge_pte(struct vm_area_struct *vma, struct folio *folio,
4816 bool try_mkwrite)
4817 {
4818 pte_t entry = folio_mk_pte(folio, vma->vm_page_prot);
4819 unsigned int shift = huge_page_shift(hstate_vma(vma));
4820
4821 if (try_mkwrite && (vma->vm_flags & VM_WRITE)) {
4822 entry = pte_mkwrite_novma(pte_mkdirty(entry));
4823 } else {
4824 entry = pte_wrprotect(entry);
4825 }
4826 entry = pte_mkyoung(entry);
4827 entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
4828
4829 return entry;
4830 }
4831
set_huge_ptep_writable(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)4832 static void set_huge_ptep_writable(struct vm_area_struct *vma,
4833 unsigned long address, pte_t *ptep)
4834 {
4835 pte_t entry;
4836
4837 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(vma->vm_mm, address, ptep)));
4838 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
4839 update_mmu_cache(vma, address, ptep);
4840 }
4841
set_huge_ptep_maybe_writable(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)4842 static void set_huge_ptep_maybe_writable(struct vm_area_struct *vma,
4843 unsigned long address, pte_t *ptep)
4844 {
4845 if (vma->vm_flags & VM_WRITE)
4846 set_huge_ptep_writable(vma, address, ptep);
4847 }
4848
4849 static void
hugetlb_install_folio(struct vm_area_struct * vma,pte_t * ptep,unsigned long addr,struct folio * new_folio,pte_t old,unsigned long sz)4850 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
4851 struct folio *new_folio, pte_t old, unsigned long sz)
4852 {
4853 pte_t newpte = make_huge_pte(vma, new_folio, true);
4854
4855 __folio_mark_uptodate(new_folio);
4856 hugetlb_add_new_anon_rmap(new_folio, vma, addr);
4857 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old))
4858 newpte = huge_pte_mkuffd_wp(newpte);
4859 set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz);
4860 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
4861 folio_set_hugetlb_migratable(new_folio);
4862 }
4863
copy_hugetlb_page_range(struct mm_struct * dst,struct mm_struct * src,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)4864 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
4865 struct vm_area_struct *dst_vma,
4866 struct vm_area_struct *src_vma)
4867 {
4868 pte_t *src_pte, *dst_pte, entry;
4869 struct folio *pte_folio;
4870 unsigned long addr;
4871 bool cow = is_cow_mapping(src_vma->vm_flags);
4872 struct hstate *h = hstate_vma(src_vma);
4873 unsigned long sz = huge_page_size(h);
4874 unsigned long npages = pages_per_huge_page(h);
4875 struct mmu_notifier_range range;
4876 unsigned long last_addr_mask;
4877 softleaf_t softleaf;
4878 int ret = 0;
4879
4880 if (cow) {
4881 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src,
4882 src_vma->vm_start,
4883 src_vma->vm_end);
4884 mmu_notifier_invalidate_range_start(&range);
4885 vma_assert_write_locked(src_vma);
4886 raw_write_seqcount_begin(&src->write_protect_seq);
4887 } else {
4888 /*
4889 * For shared mappings the vma lock must be held before
4890 * calling hugetlb_walk() in the src vma. Otherwise, the
4891 * returned ptep could go away if part of a shared pmd and
4892 * another thread calls huge_pmd_unshare.
4893 */
4894 hugetlb_vma_lock_read(src_vma);
4895 }
4896
4897 last_addr_mask = hugetlb_mask_last_page(h);
4898 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
4899 spinlock_t *src_ptl, *dst_ptl;
4900 src_pte = hugetlb_walk(src_vma, addr, sz);
4901 if (!src_pte) {
4902 addr |= last_addr_mask;
4903 continue;
4904 }
4905 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz);
4906 if (!dst_pte) {
4907 ret = -ENOMEM;
4908 break;
4909 }
4910
4911 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
4912 /* If the pagetables are shared, there is nothing to do */
4913 if (ptdesc_pmd_is_shared(virt_to_ptdesc(dst_pte))) {
4914 addr |= last_addr_mask;
4915 continue;
4916 }
4917 #endif
4918
4919 dst_ptl = huge_pte_lock(h, dst, dst_pte);
4920 src_ptl = huge_pte_lockptr(h, src, src_pte);
4921 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4922 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
4923 again:
4924 if (huge_pte_none(entry)) {
4925 /* Skip if src entry none. */
4926 goto next;
4927 }
4928
4929 softleaf = softleaf_from_pte(entry);
4930 if (unlikely(softleaf_is_hwpoison(softleaf))) {
4931 if (!userfaultfd_wp(dst_vma))
4932 entry = huge_pte_clear_uffd_wp(entry);
4933 set_huge_pte_at(dst, addr, dst_pte, entry, sz);
4934 } else if (unlikely(softleaf_is_migration(softleaf))) {
4935 bool uffd_wp = pte_swp_uffd_wp(entry);
4936
4937 if (!softleaf_is_migration_read(softleaf) && cow) {
4938 /*
4939 * COW mappings require pages in both
4940 * parent and child to be set to read.
4941 */
4942 softleaf = make_readable_migration_entry(
4943 swp_offset(softleaf));
4944 entry = swp_entry_to_pte(softleaf);
4945 if (userfaultfd_wp(src_vma) && uffd_wp)
4946 entry = pte_swp_mkuffd_wp(entry);
4947 set_huge_pte_at(src, addr, src_pte, entry, sz);
4948 }
4949 if (!userfaultfd_wp(dst_vma))
4950 entry = huge_pte_clear_uffd_wp(entry);
4951 set_huge_pte_at(dst, addr, dst_pte, entry, sz);
4952 } else if (unlikely(pte_is_marker(entry))) {
4953 const pte_marker marker = copy_pte_marker(softleaf, dst_vma);
4954
4955 if (marker)
4956 set_huge_pte_at(dst, addr, dst_pte,
4957 make_pte_marker(marker), sz);
4958 } else {
4959 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
4960 pte_folio = page_folio(pte_page(entry));
4961 folio_get(pte_folio);
4962
4963 /*
4964 * Failing to duplicate the anon rmap is a rare case
4965 * where we see pinned hugetlb pages while they're
4966 * prone to COW. We need to do the COW earlier during
4967 * fork.
4968 *
4969 * When pre-allocating the page or copying data, we
4970 * need to be without the pgtable locks since we could
4971 * sleep during the process.
4972 */
4973 if (!folio_test_anon(pte_folio)) {
4974 hugetlb_add_file_rmap(pte_folio);
4975 } else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) {
4976 pte_t src_pte_old = entry;
4977 struct folio *new_folio;
4978
4979 spin_unlock(src_ptl);
4980 spin_unlock(dst_ptl);
4981 /* Do not use reserve as it's private owned */
4982 new_folio = alloc_hugetlb_folio(dst_vma, addr, false);
4983 if (IS_ERR(new_folio)) {
4984 folio_put(pte_folio);
4985 ret = PTR_ERR(new_folio);
4986 break;
4987 }
4988 ret = copy_user_large_folio(new_folio, pte_folio,
4989 addr, dst_vma);
4990 folio_put(pte_folio);
4991 if (ret) {
4992 folio_put(new_folio);
4993 break;
4994 }
4995
4996 /* Install the new hugetlb folio if src pte stable */
4997 dst_ptl = huge_pte_lock(h, dst, dst_pte);
4998 src_ptl = huge_pte_lockptr(h, src, src_pte);
4999 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5000 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
5001 if (!pte_same(src_pte_old, entry)) {
5002 restore_reserve_on_error(h, dst_vma, addr,
5003 new_folio);
5004 folio_put(new_folio);
5005 /* huge_ptep of dst_pte won't change as in child */
5006 goto again;
5007 }
5008 hugetlb_install_folio(dst_vma, dst_pte, addr,
5009 new_folio, src_pte_old, sz);
5010 goto next;
5011 }
5012
5013 if (cow) {
5014 /*
5015 * No need to notify as we are downgrading page
5016 * table protection not changing it to point
5017 * to a new page.
5018 *
5019 * See Documentation/mm/mmu_notifier.rst
5020 */
5021 huge_ptep_set_wrprotect(src, addr, src_pte);
5022 entry = huge_pte_wrprotect(entry);
5023 }
5024
5025 if (!userfaultfd_wp(dst_vma))
5026 entry = huge_pte_clear_uffd_wp(entry);
5027
5028 set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5029 hugetlb_count_add(npages, dst);
5030 }
5031
5032 next:
5033 spin_unlock(src_ptl);
5034 spin_unlock(dst_ptl);
5035 }
5036
5037 if (cow) {
5038 raw_write_seqcount_end(&src->write_protect_seq);
5039 mmu_notifier_invalidate_range_end(&range);
5040 } else {
5041 hugetlb_vma_unlock_read(src_vma);
5042 }
5043
5044 return ret;
5045 }
5046
move_huge_pte(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pte_t * src_pte,pte_t * dst_pte,unsigned long sz)5047 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
5048 unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte,
5049 unsigned long sz)
5050 {
5051 bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
5052 struct hstate *h = hstate_vma(vma);
5053 struct mm_struct *mm = vma->vm_mm;
5054 spinlock_t *src_ptl, *dst_ptl;
5055 pte_t pte;
5056
5057 dst_ptl = huge_pte_lock(h, mm, dst_pte);
5058 src_ptl = huge_pte_lockptr(h, mm, src_pte);
5059
5060 /*
5061 * We don't have to worry about the ordering of src and dst ptlocks
5062 * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock.
5063 */
5064 if (src_ptl != dst_ptl)
5065 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5066
5067 pte = huge_ptep_get_and_clear(mm, old_addr, src_pte, sz);
5068
5069 if (need_clear_uffd_wp && pte_is_uffd_wp_marker(pte)) {
5070 huge_pte_clear(mm, new_addr, dst_pte, sz);
5071 } else {
5072 if (need_clear_uffd_wp) {
5073 if (pte_present(pte))
5074 pte = huge_pte_clear_uffd_wp(pte);
5075 else
5076 pte = pte_swp_clear_uffd_wp(pte);
5077 }
5078 set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
5079 }
5080
5081 if (src_ptl != dst_ptl)
5082 spin_unlock(src_ptl);
5083 spin_unlock(dst_ptl);
5084 }
5085
move_hugetlb_page_tables(struct vm_area_struct * vma,struct vm_area_struct * new_vma,unsigned long old_addr,unsigned long new_addr,unsigned long len)5086 int move_hugetlb_page_tables(struct vm_area_struct *vma,
5087 struct vm_area_struct *new_vma,
5088 unsigned long old_addr, unsigned long new_addr,
5089 unsigned long len)
5090 {
5091 struct hstate *h = hstate_vma(vma);
5092 struct address_space *mapping = vma->vm_file->f_mapping;
5093 unsigned long sz = huge_page_size(h);
5094 struct mm_struct *mm = vma->vm_mm;
5095 unsigned long old_end = old_addr + len;
5096 unsigned long last_addr_mask;
5097 pte_t *src_pte, *dst_pte;
5098 struct mmu_notifier_range range;
5099 bool shared_pmd = false;
5100
5101 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr,
5102 old_end);
5103 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5104 /*
5105 * In case of shared PMDs, we should cover the maximum possible
5106 * range.
5107 */
5108 flush_cache_range(vma, range.start, range.end);
5109
5110 mmu_notifier_invalidate_range_start(&range);
5111 last_addr_mask = hugetlb_mask_last_page(h);
5112 /* Prevent race with file truncation */
5113 hugetlb_vma_lock_write(vma);
5114 i_mmap_lock_write(mapping);
5115 for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
5116 src_pte = hugetlb_walk(vma, old_addr, sz);
5117 if (!src_pte) {
5118 old_addr |= last_addr_mask;
5119 new_addr |= last_addr_mask;
5120 continue;
5121 }
5122 if (huge_pte_none(huge_ptep_get(mm, old_addr, src_pte)))
5123 continue;
5124
5125 if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
5126 shared_pmd = true;
5127 old_addr |= last_addr_mask;
5128 new_addr |= last_addr_mask;
5129 continue;
5130 }
5131
5132 dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
5133 if (!dst_pte)
5134 break;
5135
5136 move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz);
5137 }
5138
5139 if (shared_pmd)
5140 flush_hugetlb_tlb_range(vma, range.start, range.end);
5141 else
5142 flush_hugetlb_tlb_range(vma, old_end - len, old_end);
5143 mmu_notifier_invalidate_range_end(&range);
5144 i_mmap_unlock_write(mapping);
5145 hugetlb_vma_unlock_write(vma);
5146
5147 return len + old_addr - old_end;
5148 }
5149
__unmap_hugepage_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct folio * folio,zap_flags_t zap_flags)5150 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5151 unsigned long start, unsigned long end,
5152 struct folio *folio, zap_flags_t zap_flags)
5153 {
5154 struct mm_struct *mm = vma->vm_mm;
5155 const bool folio_provided = !!folio;
5156 unsigned long address;
5157 pte_t *ptep;
5158 pte_t pte;
5159 spinlock_t *ptl;
5160 struct hstate *h = hstate_vma(vma);
5161 unsigned long sz = huge_page_size(h);
5162 bool adjust_reservation;
5163 unsigned long last_addr_mask;
5164 bool force_flush = false;
5165
5166 WARN_ON(!is_vm_hugetlb_page(vma));
5167 BUG_ON(start & ~huge_page_mask(h));
5168 BUG_ON(end & ~huge_page_mask(h));
5169
5170 /*
5171 * This is a hugetlb vma, all the pte entries should point
5172 * to huge page.
5173 */
5174 tlb_change_page_size(tlb, sz);
5175 tlb_start_vma(tlb, vma);
5176
5177 last_addr_mask = hugetlb_mask_last_page(h);
5178 address = start;
5179 for (; address < end; address += sz) {
5180 ptep = hugetlb_walk(vma, address, sz);
5181 if (!ptep) {
5182 address |= last_addr_mask;
5183 continue;
5184 }
5185
5186 ptl = huge_pte_lock(h, mm, ptep);
5187 if (huge_pmd_unshare(mm, vma, address, ptep)) {
5188 spin_unlock(ptl);
5189 tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
5190 force_flush = true;
5191 address |= last_addr_mask;
5192 continue;
5193 }
5194
5195 pte = huge_ptep_get(mm, address, ptep);
5196 if (huge_pte_none(pte)) {
5197 spin_unlock(ptl);
5198 continue;
5199 }
5200
5201 /*
5202 * Migrating hugepage or HWPoisoned hugepage is already
5203 * unmapped and its refcount is dropped, so just clear pte here.
5204 */
5205 if (unlikely(!pte_present(pte))) {
5206 /*
5207 * If the pte was wr-protected by uffd-wp in any of the
5208 * swap forms, meanwhile the caller does not want to
5209 * drop the uffd-wp bit in this zap, then replace the
5210 * pte with a marker.
5211 */
5212 if (pte_swp_uffd_wp_any(pte) &&
5213 !(zap_flags & ZAP_FLAG_DROP_MARKER))
5214 set_huge_pte_at(mm, address, ptep,
5215 make_pte_marker(PTE_MARKER_UFFD_WP),
5216 sz);
5217 else
5218 huge_pte_clear(mm, address, ptep, sz);
5219 spin_unlock(ptl);
5220 continue;
5221 }
5222
5223 /*
5224 * If a folio is supplied, it is because a specific
5225 * folio is being unmapped, not a range. Ensure the folio we
5226 * are about to unmap is the actual folio of interest.
5227 */
5228 if (folio_provided) {
5229 if (folio != page_folio(pte_page(pte))) {
5230 spin_unlock(ptl);
5231 continue;
5232 }
5233 /*
5234 * Mark the VMA as having unmapped its page so that
5235 * future faults in this VMA will fail rather than
5236 * looking like data was lost
5237 */
5238 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
5239 } else {
5240 folio = page_folio(pte_page(pte));
5241 }
5242
5243 pte = huge_ptep_get_and_clear(mm, address, ptep, sz);
5244 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
5245 if (huge_pte_dirty(pte))
5246 folio_mark_dirty(folio);
5247 /* Leave a uffd-wp pte marker if needed */
5248 if (huge_pte_uffd_wp(pte) &&
5249 !(zap_flags & ZAP_FLAG_DROP_MARKER))
5250 set_huge_pte_at(mm, address, ptep,
5251 make_pte_marker(PTE_MARKER_UFFD_WP),
5252 sz);
5253 hugetlb_count_sub(pages_per_huge_page(h), mm);
5254 hugetlb_remove_rmap(folio);
5255 spin_unlock(ptl);
5256
5257 /*
5258 * Restore the reservation for anonymous page, otherwise the
5259 * backing page could be stolen by someone.
5260 * If there we are freeing a surplus, do not set the restore
5261 * reservation bit.
5262 */
5263 adjust_reservation = false;
5264
5265 spin_lock_irq(&hugetlb_lock);
5266 if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
5267 folio_test_anon(folio)) {
5268 folio_set_hugetlb_restore_reserve(folio);
5269 /* Reservation to be adjusted after the spin lock */
5270 adjust_reservation = true;
5271 }
5272 spin_unlock_irq(&hugetlb_lock);
5273
5274 /*
5275 * Adjust the reservation for the region that will have the
5276 * reserve restored. Keep in mind that vma_needs_reservation() changes
5277 * resv->adds_in_progress if it succeeds. If this is not done,
5278 * do_exit() will not see it, and will keep the reservation
5279 * forever.
5280 */
5281 if (adjust_reservation) {
5282 int rc = vma_needs_reservation(h, vma, address);
5283
5284 if (rc < 0)
5285 /* Pressumably allocate_file_region_entries failed
5286 * to allocate a file_region struct. Clear
5287 * hugetlb_restore_reserve so that global reserve
5288 * count will not be incremented by free_huge_folio.
5289 * Act as if we consumed the reservation.
5290 */
5291 folio_clear_hugetlb_restore_reserve(folio);
5292 else if (rc)
5293 vma_add_reservation(h, vma, address);
5294 }
5295
5296 tlb_remove_page_size(tlb, folio_page(folio, 0),
5297 folio_size(folio));
5298 /*
5299 * If we were instructed to unmap a specific folio, we're done.
5300 */
5301 if (folio_provided)
5302 break;
5303 }
5304 tlb_end_vma(tlb, vma);
5305
5306 /*
5307 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
5308 * could defer the flush until now, since by holding i_mmap_rwsem we
5309 * guaranteed that the last reference would not be dropped. But we must
5310 * do the flushing before we return, as otherwise i_mmap_rwsem will be
5311 * dropped and the last reference to the shared PMDs page might be
5312 * dropped as well.
5313 *
5314 * In theory we could defer the freeing of the PMD pages as well, but
5315 * huge_pmd_unshare() relies on the exact page_count for the PMD page to
5316 * detect sharing, so we cannot defer the release of the page either.
5317 * Instead, do flush now.
5318 */
5319 if (force_flush)
5320 tlb_flush_mmu_tlbonly(tlb);
5321 }
5322
__hugetlb_zap_begin(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)5323 void __hugetlb_zap_begin(struct vm_area_struct *vma,
5324 unsigned long *start, unsigned long *end)
5325 {
5326 if (!vma->vm_file) /* hugetlbfs_file_mmap error */
5327 return;
5328
5329 adjust_range_if_pmd_sharing_possible(vma, start, end);
5330 hugetlb_vma_lock_write(vma);
5331 if (vma->vm_file)
5332 i_mmap_lock_write(vma->vm_file->f_mapping);
5333 }
5334
__hugetlb_zap_end(struct vm_area_struct * vma,struct zap_details * details)5335 void __hugetlb_zap_end(struct vm_area_struct *vma,
5336 struct zap_details *details)
5337 {
5338 zap_flags_t zap_flags = details ? details->zap_flags : 0;
5339
5340 if (!vma->vm_file) /* hugetlbfs_file_mmap error */
5341 return;
5342
5343 if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */
5344 /*
5345 * Unlock and free the vma lock before releasing i_mmap_rwsem.
5346 * When the vma_lock is freed, this makes the vma ineligible
5347 * for pmd sharing. And, i_mmap_rwsem is required to set up
5348 * pmd sharing. This is important as page tables for this
5349 * unmapped range will be asynchrously deleted. If the page
5350 * tables are shared, there will be issues when accessed by
5351 * someone else.
5352 */
5353 __hugetlb_vma_unlock_write_free(vma);
5354 } else {
5355 hugetlb_vma_unlock_write(vma);
5356 }
5357
5358 if (vma->vm_file)
5359 i_mmap_unlock_write(vma->vm_file->f_mapping);
5360 }
5361
unmap_hugepage_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct folio * folio,zap_flags_t zap_flags)5362 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
5363 unsigned long end, struct folio *folio,
5364 zap_flags_t zap_flags)
5365 {
5366 struct mmu_notifier_range range;
5367 struct mmu_gather tlb;
5368
5369 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
5370 start, end);
5371 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5372 mmu_notifier_invalidate_range_start(&range);
5373 tlb_gather_mmu(&tlb, vma->vm_mm);
5374
5375 __unmap_hugepage_range(&tlb, vma, start, end,
5376 folio, zap_flags);
5377
5378 mmu_notifier_invalidate_range_end(&range);
5379 tlb_finish_mmu(&tlb);
5380 }
5381
5382 /*
5383 * This is called when the original mapper is failing to COW a MAP_PRIVATE
5384 * mapping it owns the reserve page for. The intention is to unmap the page
5385 * from other VMAs and let the children be SIGKILLed if they are faulting the
5386 * same region.
5387 */
unmap_ref_private(struct mm_struct * mm,struct vm_area_struct * vma,struct folio * folio,unsigned long address)5388 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
5389 struct folio *folio, unsigned long address)
5390 {
5391 struct hstate *h = hstate_vma(vma);
5392 struct vm_area_struct *iter_vma;
5393 struct address_space *mapping;
5394 pgoff_t pgoff;
5395
5396 /*
5397 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
5398 * from page cache lookup which is in HPAGE_SIZE units.
5399 */
5400 address = address & huge_page_mask(h);
5401 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
5402 vma->vm_pgoff;
5403 mapping = vma->vm_file->f_mapping;
5404
5405 /*
5406 * Take the mapping lock for the duration of the table walk. As
5407 * this mapping should be shared between all the VMAs,
5408 * __unmap_hugepage_range() is called as the lock is already held
5409 */
5410 i_mmap_lock_write(mapping);
5411 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
5412 /* Do not unmap the current VMA */
5413 if (iter_vma == vma)
5414 continue;
5415
5416 /*
5417 * Shared VMAs have their own reserves and do not affect
5418 * MAP_PRIVATE accounting but it is possible that a shared
5419 * VMA is using the same page so check and skip such VMAs.
5420 */
5421 if (iter_vma->vm_flags & VM_MAYSHARE)
5422 continue;
5423
5424 /*
5425 * Unmap the page from other VMAs without their own reserves.
5426 * They get marked to be SIGKILLed if they fault in these
5427 * areas. This is because a future no-page fault on this VMA
5428 * could insert a zeroed page instead of the data existing
5429 * from the time of fork. This would look like data corruption
5430 */
5431 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
5432 unmap_hugepage_range(iter_vma, address,
5433 address + huge_page_size(h),
5434 folio, 0);
5435 }
5436 i_mmap_unlock_write(mapping);
5437 }
5438
5439 /*
5440 * hugetlb_wp() should be called with page lock of the original hugepage held.
5441 * Called with hugetlb_fault_mutex_table held and pte_page locked so we
5442 * cannot race with other handlers or page migration.
5443 * Keep the pte_same checks anyway to make transition from the mutex easier.
5444 */
hugetlb_wp(struct vm_fault * vmf)5445 static vm_fault_t hugetlb_wp(struct vm_fault *vmf)
5446 {
5447 struct vm_area_struct *vma = vmf->vma;
5448 struct mm_struct *mm = vma->vm_mm;
5449 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
5450 pte_t pte = huge_ptep_get(mm, vmf->address, vmf->pte);
5451 struct hstate *h = hstate_vma(vma);
5452 struct folio *old_folio;
5453 struct folio *new_folio;
5454 bool cow_from_owner = 0;
5455 vm_fault_t ret = 0;
5456 struct mmu_notifier_range range;
5457
5458 /*
5459 * Never handle CoW for uffd-wp protected pages. It should be only
5460 * handled when the uffd-wp protection is removed.
5461 *
5462 * Note that only the CoW optimization path (in hugetlb_no_page())
5463 * can trigger this, because hugetlb_fault() will always resolve
5464 * uffd-wp bit first.
5465 */
5466 if (!unshare && huge_pte_uffd_wp(pte))
5467 return 0;
5468
5469 /* Let's take out MAP_SHARED mappings first. */
5470 if (vma->vm_flags & VM_MAYSHARE) {
5471 set_huge_ptep_writable(vma, vmf->address, vmf->pte);
5472 return 0;
5473 }
5474
5475 old_folio = page_folio(pte_page(pte));
5476
5477 delayacct_wpcopy_start();
5478
5479 retry_avoidcopy:
5480 /*
5481 * If no-one else is actually using this page, we're the exclusive
5482 * owner and can reuse this page.
5483 *
5484 * Note that we don't rely on the (safer) folio refcount here, because
5485 * copying the hugetlb folio when there are unexpected (temporary)
5486 * folio references could harm simple fork()+exit() users when
5487 * we run out of free hugetlb folios: we would have to kill processes
5488 * in scenarios that used to work. As a side effect, there can still
5489 * be leaks between processes, for example, with FOLL_GET users.
5490 */
5491 if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) {
5492 if (!PageAnonExclusive(&old_folio->page)) {
5493 folio_move_anon_rmap(old_folio, vma);
5494 SetPageAnonExclusive(&old_folio->page);
5495 }
5496 if (likely(!unshare))
5497 set_huge_ptep_maybe_writable(vma, vmf->address,
5498 vmf->pte);
5499
5500 delayacct_wpcopy_end();
5501 return 0;
5502 }
5503 VM_BUG_ON_PAGE(folio_test_anon(old_folio) &&
5504 PageAnonExclusive(&old_folio->page), &old_folio->page);
5505
5506 /*
5507 * If the process that created a MAP_PRIVATE mapping is about to perform
5508 * a COW due to a shared page count, attempt to satisfy the allocation
5509 * without using the existing reserves.
5510 * In order to determine where this is a COW on a MAP_PRIVATE mapping it
5511 * is enough to check whether the old_folio is anonymous. This means that
5512 * the reserve for this address was consumed. If reserves were used, a
5513 * partial faulted mapping at the fime of fork() could consume its reserves
5514 * on COW instead of the full address range.
5515 */
5516 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
5517 folio_test_anon(old_folio))
5518 cow_from_owner = true;
5519
5520 folio_get(old_folio);
5521
5522 /*
5523 * Drop page table lock as buddy allocator may be called. It will
5524 * be acquired again before returning to the caller, as expected.
5525 */
5526 spin_unlock(vmf->ptl);
5527 new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner);
5528
5529 if (IS_ERR(new_folio)) {
5530 /*
5531 * If a process owning a MAP_PRIVATE mapping fails to COW,
5532 * it is due to references held by a child and an insufficient
5533 * huge page pool. To guarantee the original mappers
5534 * reliability, unmap the page from child processes. The child
5535 * may get SIGKILLed if it later faults.
5536 */
5537 if (cow_from_owner) {
5538 struct address_space *mapping = vma->vm_file->f_mapping;
5539 pgoff_t idx;
5540 u32 hash;
5541
5542 folio_put(old_folio);
5543 /*
5544 * Drop hugetlb_fault_mutex and vma_lock before
5545 * unmapping. unmapping needs to hold vma_lock
5546 * in write mode. Dropping vma_lock in read mode
5547 * here is OK as COW mappings do not interact with
5548 * PMD sharing.
5549 *
5550 * Reacquire both after unmap operation.
5551 */
5552 idx = vma_hugecache_offset(h, vma, vmf->address);
5553 hash = hugetlb_fault_mutex_hash(mapping, idx);
5554 hugetlb_vma_unlock_read(vma);
5555 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5556
5557 unmap_ref_private(mm, vma, old_folio, vmf->address);
5558
5559 mutex_lock(&hugetlb_fault_mutex_table[hash]);
5560 hugetlb_vma_lock_read(vma);
5561 spin_lock(vmf->ptl);
5562 vmf->pte = hugetlb_walk(vma, vmf->address,
5563 huge_page_size(h));
5564 if (likely(vmf->pte &&
5565 pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte)))
5566 goto retry_avoidcopy;
5567 /*
5568 * race occurs while re-acquiring page table
5569 * lock, and our job is done.
5570 */
5571 delayacct_wpcopy_end();
5572 return 0;
5573 }
5574
5575 ret = vmf_error(PTR_ERR(new_folio));
5576 goto out_release_old;
5577 }
5578
5579 /*
5580 * When the original hugepage is shared one, it does not have
5581 * anon_vma prepared.
5582 */
5583 ret = __vmf_anon_prepare(vmf);
5584 if (unlikely(ret))
5585 goto out_release_all;
5586
5587 if (copy_user_large_folio(new_folio, old_folio, vmf->real_address, vma)) {
5588 ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h));
5589 goto out_release_all;
5590 }
5591 __folio_mark_uptodate(new_folio);
5592
5593 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address,
5594 vmf->address + huge_page_size(h));
5595 mmu_notifier_invalidate_range_start(&range);
5596
5597 /*
5598 * Retake the page table lock to check for racing updates
5599 * before the page tables are altered
5600 */
5601 spin_lock(vmf->ptl);
5602 vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h));
5603 if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) {
5604 pte_t newpte = make_huge_pte(vma, new_folio, !unshare);
5605
5606 /* Break COW or unshare */
5607 huge_ptep_clear_flush(vma, vmf->address, vmf->pte);
5608 hugetlb_remove_rmap(old_folio);
5609 hugetlb_add_new_anon_rmap(new_folio, vma, vmf->address);
5610 if (huge_pte_uffd_wp(pte))
5611 newpte = huge_pte_mkuffd_wp(newpte);
5612 set_huge_pte_at(mm, vmf->address, vmf->pte, newpte,
5613 huge_page_size(h));
5614 folio_set_hugetlb_migratable(new_folio);
5615 /* Make the old page be freed below */
5616 new_folio = old_folio;
5617 }
5618 spin_unlock(vmf->ptl);
5619 mmu_notifier_invalidate_range_end(&range);
5620 out_release_all:
5621 /*
5622 * No restore in case of successful pagetable update (Break COW or
5623 * unshare)
5624 */
5625 if (new_folio != old_folio)
5626 restore_reserve_on_error(h, vma, vmf->address, new_folio);
5627 folio_put(new_folio);
5628 out_release_old:
5629 folio_put(old_folio);
5630
5631 spin_lock(vmf->ptl); /* Caller expects lock to be held */
5632
5633 delayacct_wpcopy_end();
5634 return ret;
5635 }
5636
5637 /*
5638 * Return whether there is a pagecache page to back given address within VMA.
5639 */
hugetlbfs_pagecache_present(struct hstate * h,struct vm_area_struct * vma,unsigned long address)5640 bool hugetlbfs_pagecache_present(struct hstate *h,
5641 struct vm_area_struct *vma, unsigned long address)
5642 {
5643 struct address_space *mapping = vma->vm_file->f_mapping;
5644 pgoff_t idx = linear_page_index(vma, address);
5645 struct folio *folio;
5646
5647 folio = filemap_get_folio(mapping, idx);
5648 if (IS_ERR(folio))
5649 return false;
5650 folio_put(folio);
5651 return true;
5652 }
5653
hugetlb_add_to_page_cache(struct folio * folio,struct address_space * mapping,pgoff_t idx)5654 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
5655 pgoff_t idx)
5656 {
5657 struct inode *inode = mapping->host;
5658 struct hstate *h = hstate_inode(inode);
5659 int err;
5660
5661 idx <<= huge_page_order(h);
5662 __folio_set_locked(folio);
5663 err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
5664
5665 if (unlikely(err)) {
5666 __folio_clear_locked(folio);
5667 return err;
5668 }
5669 folio_clear_hugetlb_restore_reserve(folio);
5670
5671 /*
5672 * mark folio dirty so that it will not be removed from cache/file
5673 * by non-hugetlbfs specific code paths.
5674 */
5675 folio_mark_dirty(folio);
5676
5677 spin_lock(&inode->i_lock);
5678 inode->i_blocks += blocks_per_huge_page(h);
5679 spin_unlock(&inode->i_lock);
5680 return 0;
5681 }
5682
hugetlb_handle_userfault(struct vm_fault * vmf,struct address_space * mapping,unsigned long reason)5683 static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf,
5684 struct address_space *mapping,
5685 unsigned long reason)
5686 {
5687 u32 hash;
5688
5689 /*
5690 * vma_lock and hugetlb_fault_mutex must be dropped before handling
5691 * userfault. Also mmap_lock could be dropped due to handling
5692 * userfault, any vma operation should be careful from here.
5693 */
5694 hugetlb_vma_unlock_read(vmf->vma);
5695 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
5696 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5697 return handle_userfault(vmf, reason);
5698 }
5699
5700 /*
5701 * Recheck pte with pgtable lock. Returns true if pte didn't change, or
5702 * false if pte changed or is changing.
5703 */
hugetlb_pte_stable(struct hstate * h,struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t old_pte)5704 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr,
5705 pte_t *ptep, pte_t old_pte)
5706 {
5707 spinlock_t *ptl;
5708 bool same;
5709
5710 ptl = huge_pte_lock(h, mm, ptep);
5711 same = pte_same(huge_ptep_get(mm, addr, ptep), old_pte);
5712 spin_unlock(ptl);
5713
5714 return same;
5715 }
5716
hugetlb_no_page(struct address_space * mapping,struct vm_fault * vmf)5717 static vm_fault_t hugetlb_no_page(struct address_space *mapping,
5718 struct vm_fault *vmf)
5719 {
5720 u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
5721 bool new_folio, new_anon_folio = false;
5722 struct vm_area_struct *vma = vmf->vma;
5723 struct mm_struct *mm = vma->vm_mm;
5724 struct hstate *h = hstate_vma(vma);
5725 vm_fault_t ret = VM_FAULT_SIGBUS;
5726 bool folio_locked = true;
5727 struct folio *folio;
5728 unsigned long size;
5729 pte_t new_pte;
5730
5731 /*
5732 * Currently, we are forced to kill the process in the event the
5733 * original mapper has unmapped pages from the child due to a failed
5734 * COW/unsharing. Warn that such a situation has occurred as it may not
5735 * be obvious.
5736 */
5737 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
5738 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
5739 current->pid);
5740 goto out;
5741 }
5742
5743 /*
5744 * Use page lock to guard against racing truncation
5745 * before we get page_table_lock.
5746 */
5747 new_folio = false;
5748 folio = filemap_lock_hugetlb_folio(h, mapping, vmf->pgoff);
5749 if (IS_ERR(folio)) {
5750 size = i_size_read(mapping->host) >> huge_page_shift(h);
5751 if (vmf->pgoff >= size)
5752 goto out;
5753 /* Check for page in userfault range */
5754 if (userfaultfd_missing(vma)) {
5755 /*
5756 * Since hugetlb_no_page() was examining pte
5757 * without pgtable lock, we need to re-test under
5758 * lock because the pte may not be stable and could
5759 * have changed from under us. Try to detect
5760 * either changed or during-changing ptes and retry
5761 * properly when needed.
5762 *
5763 * Note that userfaultfd is actually fine with
5764 * false positives (e.g. caused by pte changed),
5765 * but not wrong logical events (e.g. caused by
5766 * reading a pte during changing). The latter can
5767 * confuse the userspace, so the strictness is very
5768 * much preferred. E.g., MISSING event should
5769 * never happen on the page after UFFDIO_COPY has
5770 * correctly installed the page and returned.
5771 */
5772 if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
5773 ret = 0;
5774 goto out;
5775 }
5776
5777 return hugetlb_handle_userfault(vmf, mapping,
5778 VM_UFFD_MISSING);
5779 }
5780
5781 if (!(vma->vm_flags & VM_MAYSHARE)) {
5782 ret = __vmf_anon_prepare(vmf);
5783 if (unlikely(ret))
5784 goto out;
5785 }
5786
5787 folio = alloc_hugetlb_folio(vma, vmf->address, false);
5788 if (IS_ERR(folio)) {
5789 /*
5790 * Returning error will result in faulting task being
5791 * sent SIGBUS. The hugetlb fault mutex prevents two
5792 * tasks from racing to fault in the same page which
5793 * could result in false unable to allocate errors.
5794 * Page migration does not take the fault mutex, but
5795 * does a clear then write of pte's under page table
5796 * lock. Page fault code could race with migration,
5797 * notice the clear pte and try to allocate a page
5798 * here. Before returning error, get ptl and make
5799 * sure there really is no pte entry.
5800 */
5801 if (hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte))
5802 ret = vmf_error(PTR_ERR(folio));
5803 else
5804 ret = 0;
5805 goto out;
5806 }
5807 folio_zero_user(folio, vmf->real_address);
5808 __folio_mark_uptodate(folio);
5809 new_folio = true;
5810
5811 if (vma->vm_flags & VM_MAYSHARE) {
5812 int err = hugetlb_add_to_page_cache(folio, mapping,
5813 vmf->pgoff);
5814 if (err) {
5815 /*
5816 * err can't be -EEXIST which implies someone
5817 * else consumed the reservation since hugetlb
5818 * fault mutex is held when add a hugetlb page
5819 * to the page cache. So it's safe to call
5820 * restore_reserve_on_error() here.
5821 */
5822 restore_reserve_on_error(h, vma, vmf->address,
5823 folio);
5824 folio_put(folio);
5825 ret = VM_FAULT_SIGBUS;
5826 goto out;
5827 }
5828 } else {
5829 new_anon_folio = true;
5830 folio_lock(folio);
5831 }
5832 } else {
5833 /*
5834 * If memory error occurs between mmap() and fault, some process
5835 * don't have hwpoisoned swap entry for errored virtual address.
5836 * So we need to block hugepage fault by PG_hwpoison bit check.
5837 */
5838 if (unlikely(folio_test_hwpoison(folio))) {
5839 ret = VM_FAULT_HWPOISON_LARGE |
5840 VM_FAULT_SET_HINDEX(hstate_index(h));
5841 goto backout_unlocked;
5842 }
5843
5844 /* Check for page in userfault range. */
5845 if (userfaultfd_minor(vma)) {
5846 folio_unlock(folio);
5847 folio_put(folio);
5848 /* See comment in userfaultfd_missing() block above */
5849 if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
5850 ret = 0;
5851 goto out;
5852 }
5853 return hugetlb_handle_userfault(vmf, mapping,
5854 VM_UFFD_MINOR);
5855 }
5856 }
5857
5858 /*
5859 * If we are going to COW a private mapping later, we examine the
5860 * pending reservations for this page now. This will ensure that
5861 * any allocations necessary to record that reservation occur outside
5862 * the spinlock.
5863 */
5864 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5865 if (vma_needs_reservation(h, vma, vmf->address) < 0) {
5866 ret = VM_FAULT_OOM;
5867 goto backout_unlocked;
5868 }
5869 /* Just decrements count, does not deallocate */
5870 vma_end_reservation(h, vma, vmf->address);
5871 }
5872
5873 vmf->ptl = huge_pte_lock(h, mm, vmf->pte);
5874 ret = 0;
5875 /* If pte changed from under us, retry */
5876 if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte))
5877 goto backout;
5878
5879 if (new_anon_folio)
5880 hugetlb_add_new_anon_rmap(folio, vma, vmf->address);
5881 else
5882 hugetlb_add_file_rmap(folio);
5883 new_pte = make_huge_pte(vma, folio, vma->vm_flags & VM_SHARED);
5884 /*
5885 * If this pte was previously wr-protected, keep it wr-protected even
5886 * if populated.
5887 */
5888 if (unlikely(pte_is_uffd_wp_marker(vmf->orig_pte)))
5889 new_pte = huge_pte_mkuffd_wp(new_pte);
5890 set_huge_pte_at(mm, vmf->address, vmf->pte, new_pte, huge_page_size(h));
5891
5892 hugetlb_count_add(pages_per_huge_page(h), mm);
5893 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5894 /*
5895 * No need to keep file folios locked. See comment in
5896 * hugetlb_fault().
5897 */
5898 if (!new_anon_folio) {
5899 folio_locked = false;
5900 folio_unlock(folio);
5901 }
5902 /* Optimization, do the COW without a second fault */
5903 ret = hugetlb_wp(vmf);
5904 }
5905
5906 spin_unlock(vmf->ptl);
5907
5908 /*
5909 * Only set hugetlb_migratable in newly allocated pages. Existing pages
5910 * found in the pagecache may not have hugetlb_migratable if they have
5911 * been isolated for migration.
5912 */
5913 if (new_folio)
5914 folio_set_hugetlb_migratable(folio);
5915
5916 if (folio_locked)
5917 folio_unlock(folio);
5918 out:
5919 hugetlb_vma_unlock_read(vma);
5920
5921 /*
5922 * We must check to release the per-VMA lock. __vmf_anon_prepare() is
5923 * the only way ret can be set to VM_FAULT_RETRY.
5924 */
5925 if (unlikely(ret & VM_FAULT_RETRY))
5926 vma_end_read(vma);
5927
5928 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5929 return ret;
5930
5931 backout:
5932 spin_unlock(vmf->ptl);
5933 backout_unlocked:
5934 /* We only need to restore reservations for private mappings */
5935 if (new_anon_folio)
5936 restore_reserve_on_error(h, vma, vmf->address, folio);
5937
5938 folio_unlock(folio);
5939 folio_put(folio);
5940 goto out;
5941 }
5942
5943 #ifdef CONFIG_SMP
hugetlb_fault_mutex_hash(struct address_space * mapping,pgoff_t idx)5944 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
5945 {
5946 unsigned long key[2];
5947 u32 hash;
5948
5949 key[0] = (unsigned long) mapping;
5950 key[1] = idx;
5951
5952 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
5953
5954 return hash & (num_fault_mutexes - 1);
5955 }
5956 #else
5957 /*
5958 * For uniprocessor systems we always use a single mutex, so just
5959 * return 0 and avoid the hashing overhead.
5960 */
hugetlb_fault_mutex_hash(struct address_space * mapping,pgoff_t idx)5961 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
5962 {
5963 return 0;
5964 }
5965 #endif
5966
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)5967 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
5968 unsigned long address, unsigned int flags)
5969 {
5970 vm_fault_t ret;
5971 u32 hash;
5972 struct folio *folio = NULL;
5973 struct hstate *h = hstate_vma(vma);
5974 struct address_space *mapping;
5975 bool need_wait_lock = false;
5976 struct vm_fault vmf = {
5977 .vma = vma,
5978 .address = address & huge_page_mask(h),
5979 .real_address = address,
5980 .flags = flags,
5981 .pgoff = vma_hugecache_offset(h, vma,
5982 address & huge_page_mask(h)),
5983 /* TODO: Track hugetlb faults using vm_fault */
5984
5985 /*
5986 * Some fields may not be initialized, be careful as it may
5987 * be hard to debug if called functions make assumptions
5988 */
5989 };
5990
5991 /*
5992 * Serialize hugepage allocation and instantiation, so that we don't
5993 * get spurious allocation failures if two CPUs race to instantiate
5994 * the same page in the page cache.
5995 */
5996 mapping = vma->vm_file->f_mapping;
5997 hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff);
5998 mutex_lock(&hugetlb_fault_mutex_table[hash]);
5999
6000 /*
6001 * Acquire vma lock before calling huge_pte_alloc and hold
6002 * until finished with vmf.pte. This prevents huge_pmd_unshare from
6003 * being called elsewhere and making the vmf.pte no longer valid.
6004 */
6005 hugetlb_vma_lock_read(vma);
6006 vmf.pte = huge_pte_alloc(mm, vma, vmf.address, huge_page_size(h));
6007 if (!vmf.pte) {
6008 hugetlb_vma_unlock_read(vma);
6009 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6010 return VM_FAULT_OOM;
6011 }
6012
6013 vmf.orig_pte = huge_ptep_get(mm, vmf.address, vmf.pte);
6014 if (huge_pte_none(vmf.orig_pte))
6015 /*
6016 * hugetlb_no_page will drop vma lock and hugetlb fault
6017 * mutex internally, which make us return immediately.
6018 */
6019 return hugetlb_no_page(mapping, &vmf);
6020
6021 if (pte_is_marker(vmf.orig_pte)) {
6022 const pte_marker marker =
6023 softleaf_to_marker(softleaf_from_pte(vmf.orig_pte));
6024
6025 if (marker & PTE_MARKER_POISONED) {
6026 ret = VM_FAULT_HWPOISON_LARGE |
6027 VM_FAULT_SET_HINDEX(hstate_index(h));
6028 goto out_mutex;
6029 } else if (WARN_ON_ONCE(marker & PTE_MARKER_GUARD)) {
6030 /* This isn't supported in hugetlb. */
6031 ret = VM_FAULT_SIGSEGV;
6032 goto out_mutex;
6033 }
6034
6035 return hugetlb_no_page(mapping, &vmf);
6036 }
6037
6038 ret = 0;
6039
6040 /* Not present, either a migration or a hwpoisoned entry */
6041 if (!pte_present(vmf.orig_pte) && !huge_pte_none(vmf.orig_pte)) {
6042 const softleaf_t softleaf = softleaf_from_pte(vmf.orig_pte);
6043
6044 if (softleaf_is_migration(softleaf)) {
6045 /*
6046 * Release the hugetlb fault lock now, but retain
6047 * the vma lock, because it is needed to guard the
6048 * huge_pte_lockptr() later in
6049 * migration_entry_wait_huge(). The vma lock will
6050 * be released there.
6051 */
6052 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6053 migration_entry_wait_huge(vma, vmf.address, vmf.pte);
6054 return 0;
6055 }
6056 if (softleaf_is_hwpoison(softleaf)) {
6057 ret = VM_FAULT_HWPOISON_LARGE |
6058 VM_FAULT_SET_HINDEX(hstate_index(h));
6059 }
6060
6061 goto out_mutex;
6062 }
6063
6064 /*
6065 * If we are going to COW/unshare the mapping later, we examine the
6066 * pending reservations for this page now. This will ensure that any
6067 * allocations necessary to record that reservation occur outside the
6068 * spinlock.
6069 */
6070 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
6071 !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(vmf.orig_pte)) {
6072 if (vma_needs_reservation(h, vma, vmf.address) < 0) {
6073 ret = VM_FAULT_OOM;
6074 goto out_mutex;
6075 }
6076 /* Just decrements count, does not deallocate */
6077 vma_end_reservation(h, vma, vmf.address);
6078 }
6079
6080 vmf.ptl = huge_pte_lock(h, mm, vmf.pte);
6081
6082 /* Check for a racing update before calling hugetlb_wp() */
6083 if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(mm, vmf.address, vmf.pte))))
6084 goto out_ptl;
6085
6086 /* Handle userfault-wp first, before trying to lock more pages */
6087 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(mm, vmf.address, vmf.pte)) &&
6088 (flags & FAULT_FLAG_WRITE) && !huge_pte_write(vmf.orig_pte)) {
6089 if (!userfaultfd_wp_async(vma)) {
6090 spin_unlock(vmf.ptl);
6091 hugetlb_vma_unlock_read(vma);
6092 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6093 return handle_userfault(&vmf, VM_UFFD_WP);
6094 }
6095
6096 vmf.orig_pte = huge_pte_clear_uffd_wp(vmf.orig_pte);
6097 set_huge_pte_at(mm, vmf.address, vmf.pte, vmf.orig_pte,
6098 huge_page_size(hstate_vma(vma)));
6099 /* Fallthrough to CoW */
6100 }
6101
6102 if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
6103 if (!huge_pte_write(vmf.orig_pte)) {
6104 /*
6105 * Anonymous folios need to be lock since hugetlb_wp()
6106 * checks whether we can re-use the folio exclusively
6107 * for us in case we are the only user of it.
6108 */
6109 folio = page_folio(pte_page(vmf.orig_pte));
6110 if (folio_test_anon(folio) && !folio_trylock(folio)) {
6111 need_wait_lock = true;
6112 goto out_ptl;
6113 }
6114 folio_get(folio);
6115 ret = hugetlb_wp(&vmf);
6116 if (folio_test_anon(folio))
6117 folio_unlock(folio);
6118 folio_put(folio);
6119 goto out_ptl;
6120 } else if (likely(flags & FAULT_FLAG_WRITE)) {
6121 vmf.orig_pte = huge_pte_mkdirty(vmf.orig_pte);
6122 }
6123 }
6124 vmf.orig_pte = pte_mkyoung(vmf.orig_pte);
6125 if (huge_ptep_set_access_flags(vma, vmf.address, vmf.pte, vmf.orig_pte,
6126 flags & FAULT_FLAG_WRITE))
6127 update_mmu_cache(vma, vmf.address, vmf.pte);
6128 out_ptl:
6129 spin_unlock(vmf.ptl);
6130 out_mutex:
6131 hugetlb_vma_unlock_read(vma);
6132
6133 /*
6134 * We must check to release the per-VMA lock. __vmf_anon_prepare() in
6135 * hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY.
6136 */
6137 if (unlikely(ret & VM_FAULT_RETRY))
6138 vma_end_read(vma);
6139
6140 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6141 /*
6142 * hugetlb_wp drops all the locks, but the folio lock, before trying to
6143 * unmap the folio from other processes. During that window, if another
6144 * process mapping that folio faults in, it will take the mutex and then
6145 * it will wait on folio_lock, causing an ABBA deadlock.
6146 * Use trylock instead and bail out if we fail.
6147 *
6148 * Ideally, we should hold a refcount on the folio we wait for, but we do
6149 * not want to use the folio after it becomes unlocked, but rather just
6150 * wait for it to become unlocked, so hopefully next fault successes on
6151 * the trylock.
6152 */
6153 if (need_wait_lock)
6154 folio_wait_locked(folio);
6155 return ret;
6156 }
6157
6158 #ifdef CONFIG_USERFAULTFD
6159 /*
6160 * Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte().
6161 */
alloc_hugetlb_folio_vma(struct hstate * h,struct vm_area_struct * vma,unsigned long address)6162 static struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
6163 struct vm_area_struct *vma, unsigned long address)
6164 {
6165 struct mempolicy *mpol;
6166 nodemask_t *nodemask;
6167 struct folio *folio;
6168 gfp_t gfp_mask;
6169 int node;
6170
6171 gfp_mask = htlb_alloc_mask(h);
6172 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
6173 /*
6174 * This is used to allocate a temporary hugetlb to hold the copied
6175 * content, which will then be copied again to the final hugetlb
6176 * consuming a reservation. Set the alloc_fallback to false to indicate
6177 * that breaking the per-node hugetlb pool is not allowed in this case.
6178 */
6179 folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask, false);
6180 mpol_cond_put(mpol);
6181
6182 return folio;
6183 }
6184
6185 /*
6186 * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
6187 * with modifications for hugetlb pages.
6188 */
hugetlb_mfill_atomic_pte(pte_t * dst_pte,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,uffd_flags_t flags,struct folio ** foliop)6189 int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
6190 struct vm_area_struct *dst_vma,
6191 unsigned long dst_addr,
6192 unsigned long src_addr,
6193 uffd_flags_t flags,
6194 struct folio **foliop)
6195 {
6196 struct mm_struct *dst_mm = dst_vma->vm_mm;
6197 bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
6198 bool wp_enabled = (flags & MFILL_ATOMIC_WP);
6199 struct hstate *h = hstate_vma(dst_vma);
6200 struct address_space *mapping = dst_vma->vm_file->f_mapping;
6201 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
6202 unsigned long size = huge_page_size(h);
6203 int vm_shared = dst_vma->vm_flags & VM_SHARED;
6204 pte_t _dst_pte;
6205 spinlock_t *ptl;
6206 int ret = -ENOMEM;
6207 struct folio *folio;
6208 bool folio_in_pagecache = false;
6209 pte_t dst_ptep;
6210
6211 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
6212 ptl = huge_pte_lock(h, dst_mm, dst_pte);
6213
6214 /* Don't overwrite any existing PTEs (even markers) */
6215 if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
6216 spin_unlock(ptl);
6217 return -EEXIST;
6218 }
6219
6220 _dst_pte = make_pte_marker(PTE_MARKER_POISONED);
6221 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
6222
6223 /* No need to invalidate - it was non-present before */
6224 update_mmu_cache(dst_vma, dst_addr, dst_pte);
6225
6226 spin_unlock(ptl);
6227 return 0;
6228 }
6229
6230 if (is_continue) {
6231 ret = -EFAULT;
6232 folio = filemap_lock_hugetlb_folio(h, mapping, idx);
6233 if (IS_ERR(folio))
6234 goto out;
6235 folio_in_pagecache = true;
6236 } else if (!*foliop) {
6237 /* If a folio already exists, then it's UFFDIO_COPY for
6238 * a non-missing case. Return -EEXIST.
6239 */
6240 if (vm_shared &&
6241 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6242 ret = -EEXIST;
6243 goto out;
6244 }
6245
6246 folio = alloc_hugetlb_folio(dst_vma, dst_addr, false);
6247 if (IS_ERR(folio)) {
6248 pte_t *actual_pte = hugetlb_walk(dst_vma, dst_addr, PMD_SIZE);
6249 if (actual_pte) {
6250 ret = -EEXIST;
6251 goto out;
6252 }
6253 ret = -ENOMEM;
6254 goto out;
6255 }
6256
6257 ret = copy_folio_from_user(folio, (const void __user *) src_addr,
6258 false);
6259
6260 /* fallback to copy_from_user outside mmap_lock */
6261 if (unlikely(ret)) {
6262 ret = -ENOENT;
6263 /* Free the allocated folio which may have
6264 * consumed a reservation.
6265 */
6266 restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6267 folio_put(folio);
6268
6269 /* Allocate a temporary folio to hold the copied
6270 * contents.
6271 */
6272 folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr);
6273 if (!folio) {
6274 ret = -ENOMEM;
6275 goto out;
6276 }
6277 *foliop = folio;
6278 /* Set the outparam foliop and return to the caller to
6279 * copy the contents outside the lock. Don't free the
6280 * folio.
6281 */
6282 goto out;
6283 }
6284 } else {
6285 if (vm_shared &&
6286 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6287 folio_put(*foliop);
6288 ret = -EEXIST;
6289 *foliop = NULL;
6290 goto out;
6291 }
6292
6293 folio = alloc_hugetlb_folio(dst_vma, dst_addr, false);
6294 if (IS_ERR(folio)) {
6295 folio_put(*foliop);
6296 ret = -ENOMEM;
6297 *foliop = NULL;
6298 goto out;
6299 }
6300 ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
6301 folio_put(*foliop);
6302 *foliop = NULL;
6303 if (ret) {
6304 folio_put(folio);
6305 goto out;
6306 }
6307 }
6308
6309 /*
6310 * If we just allocated a new page, we need a memory barrier to ensure
6311 * that preceding stores to the page become visible before the
6312 * set_pte_at() write. The memory barrier inside __folio_mark_uptodate
6313 * is what we need.
6314 *
6315 * In the case where we have not allocated a new page (is_continue),
6316 * the page must already be uptodate. UFFDIO_CONTINUE already includes
6317 * an earlier smp_wmb() to ensure that prior stores will be visible
6318 * before the set_pte_at() write.
6319 */
6320 if (!is_continue)
6321 __folio_mark_uptodate(folio);
6322 else
6323 WARN_ON_ONCE(!folio_test_uptodate(folio));
6324
6325 /* Add shared, newly allocated pages to the page cache. */
6326 if (vm_shared && !is_continue) {
6327 ret = -EFAULT;
6328 if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h)))
6329 goto out_release_nounlock;
6330
6331 /*
6332 * Serialization between remove_inode_hugepages() and
6333 * hugetlb_add_to_page_cache() below happens through the
6334 * hugetlb_fault_mutex_table that here must be hold by
6335 * the caller.
6336 */
6337 ret = hugetlb_add_to_page_cache(folio, mapping, idx);
6338 if (ret)
6339 goto out_release_nounlock;
6340 folio_in_pagecache = true;
6341 }
6342
6343 ptl = huge_pte_lock(h, dst_mm, dst_pte);
6344
6345 ret = -EIO;
6346 if (folio_test_hwpoison(folio))
6347 goto out_release_unlock;
6348
6349 ret = -EEXIST;
6350
6351 dst_ptep = huge_ptep_get(dst_mm, dst_addr, dst_pte);
6352 /*
6353 * See comment about UFFD marker overwriting in
6354 * mfill_atomic_install_pte().
6355 */
6356 if (!huge_pte_none(dst_ptep) && !pte_is_uffd_marker(dst_ptep))
6357 goto out_release_unlock;
6358
6359 if (folio_in_pagecache)
6360 hugetlb_add_file_rmap(folio);
6361 else
6362 hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr);
6363
6364 /*
6365 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
6366 * with wp flag set, don't set pte write bit.
6367 */
6368 _dst_pte = make_huge_pte(dst_vma, folio,
6369 !wp_enabled && !(is_continue && !vm_shared));
6370 /*
6371 * Always mark UFFDIO_COPY page dirty; note that this may not be
6372 * extremely important for hugetlbfs for now since swapping is not
6373 * supported, but we should still be clear in that this page cannot be
6374 * thrown away at will, even if write bit not set.
6375 */
6376 _dst_pte = huge_pte_mkdirty(_dst_pte);
6377 _dst_pte = pte_mkyoung(_dst_pte);
6378
6379 if (wp_enabled)
6380 _dst_pte = huge_pte_mkuffd_wp(_dst_pte);
6381
6382 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
6383
6384 hugetlb_count_add(pages_per_huge_page(h), dst_mm);
6385
6386 /* No need to invalidate - it was non-present before */
6387 update_mmu_cache(dst_vma, dst_addr, dst_pte);
6388
6389 spin_unlock(ptl);
6390 if (!is_continue)
6391 folio_set_hugetlb_migratable(folio);
6392 if (vm_shared || is_continue)
6393 folio_unlock(folio);
6394 ret = 0;
6395 out:
6396 return ret;
6397 out_release_unlock:
6398 spin_unlock(ptl);
6399 if (vm_shared || is_continue)
6400 folio_unlock(folio);
6401 out_release_nounlock:
6402 if (!folio_in_pagecache)
6403 restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6404 folio_put(folio);
6405 goto out;
6406 }
6407 #endif /* CONFIG_USERFAULTFD */
6408
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot,unsigned long cp_flags)6409 long hugetlb_change_protection(struct vm_area_struct *vma,
6410 unsigned long address, unsigned long end,
6411 pgprot_t newprot, unsigned long cp_flags)
6412 {
6413 struct mm_struct *mm = vma->vm_mm;
6414 unsigned long start = address;
6415 pte_t *ptep;
6416 pte_t pte;
6417 struct hstate *h = hstate_vma(vma);
6418 long pages = 0, psize = huge_page_size(h);
6419 bool shared_pmd = false;
6420 struct mmu_notifier_range range;
6421 unsigned long last_addr_mask;
6422 bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
6423 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
6424
6425 /*
6426 * In the case of shared PMDs, the area to flush could be beyond
6427 * start/end. Set range.start/range.end to cover the maximum possible
6428 * range if PMD sharing is possible.
6429 */
6430 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
6431 0, mm, start, end);
6432 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
6433
6434 BUG_ON(address >= end);
6435 flush_cache_range(vma, range.start, range.end);
6436
6437 mmu_notifier_invalidate_range_start(&range);
6438 hugetlb_vma_lock_write(vma);
6439 i_mmap_lock_write(vma->vm_file->f_mapping);
6440 last_addr_mask = hugetlb_mask_last_page(h);
6441 for (; address < end; address += psize) {
6442 softleaf_t entry;
6443 spinlock_t *ptl;
6444
6445 ptep = hugetlb_walk(vma, address, psize);
6446 if (!ptep) {
6447 if (!uffd_wp) {
6448 address |= last_addr_mask;
6449 continue;
6450 }
6451 /*
6452 * Userfaultfd wr-protect requires pgtable
6453 * pre-allocations to install pte markers.
6454 */
6455 ptep = huge_pte_alloc(mm, vma, address, psize);
6456 if (!ptep) {
6457 pages = -ENOMEM;
6458 break;
6459 }
6460 }
6461 ptl = huge_pte_lock(h, mm, ptep);
6462 if (huge_pmd_unshare(mm, vma, address, ptep)) {
6463 /*
6464 * When uffd-wp is enabled on the vma, unshare
6465 * shouldn't happen at all. Warn about it if it
6466 * happened due to some reason.
6467 */
6468 WARN_ON_ONCE(uffd_wp || uffd_wp_resolve);
6469 pages++;
6470 spin_unlock(ptl);
6471 shared_pmd = true;
6472 address |= last_addr_mask;
6473 continue;
6474 }
6475 pte = huge_ptep_get(mm, address, ptep);
6476 if (huge_pte_none(pte)) {
6477 if (unlikely(uffd_wp))
6478 /* Safe to modify directly (none->non-present). */
6479 set_huge_pte_at(mm, address, ptep,
6480 make_pte_marker(PTE_MARKER_UFFD_WP),
6481 psize);
6482 goto next;
6483 }
6484
6485 entry = softleaf_from_pte(pte);
6486 if (unlikely(softleaf_is_hwpoison(entry))) {
6487 /* Nothing to do. */
6488 } else if (unlikely(softleaf_is_migration(entry))) {
6489 struct folio *folio = softleaf_to_folio(entry);
6490 pte_t newpte = pte;
6491
6492 if (softleaf_is_migration_write(entry)) {
6493 if (folio_test_anon(folio))
6494 entry = make_readable_exclusive_migration_entry(
6495 swp_offset(entry));
6496 else
6497 entry = make_readable_migration_entry(
6498 swp_offset(entry));
6499 newpte = swp_entry_to_pte(entry);
6500 pages++;
6501 }
6502
6503 if (uffd_wp)
6504 newpte = pte_swp_mkuffd_wp(newpte);
6505 else if (uffd_wp_resolve)
6506 newpte = pte_swp_clear_uffd_wp(newpte);
6507 if (!pte_same(pte, newpte))
6508 set_huge_pte_at(mm, address, ptep, newpte, psize);
6509 } else if (unlikely(pte_is_marker(pte))) {
6510 /*
6511 * Do nothing on a poison marker; page is
6512 * corrupted, permissions do not apply. Here
6513 * pte_marker_uffd_wp()==true implies !poison
6514 * because they're mutual exclusive.
6515 */
6516 if (pte_is_uffd_wp_marker(pte) && uffd_wp_resolve)
6517 /* Safe to modify directly (non-present->none). */
6518 huge_pte_clear(mm, address, ptep, psize);
6519 } else {
6520 pte_t old_pte;
6521 unsigned int shift = huge_page_shift(hstate_vma(vma));
6522
6523 old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
6524 pte = huge_pte_modify(old_pte, newprot);
6525 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
6526 if (uffd_wp)
6527 pte = huge_pte_mkuffd_wp(pte);
6528 else if (uffd_wp_resolve)
6529 pte = huge_pte_clear_uffd_wp(pte);
6530 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
6531 pages++;
6532 }
6533
6534 next:
6535 spin_unlock(ptl);
6536 cond_resched();
6537 }
6538 /*
6539 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
6540 * may have cleared our pud entry and done put_page on the page table:
6541 * once we release i_mmap_rwsem, another task can do the final put_page
6542 * and that page table be reused and filled with junk. If we actually
6543 * did unshare a page of pmds, flush the range corresponding to the pud.
6544 */
6545 if (shared_pmd)
6546 flush_hugetlb_tlb_range(vma, range.start, range.end);
6547 else
6548 flush_hugetlb_tlb_range(vma, start, end);
6549 /*
6550 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are
6551 * downgrading page table protection not changing it to point to a new
6552 * page.
6553 *
6554 * See Documentation/mm/mmu_notifier.rst
6555 */
6556 i_mmap_unlock_write(vma->vm_file->f_mapping);
6557 hugetlb_vma_unlock_write(vma);
6558 mmu_notifier_invalidate_range_end(&range);
6559
6560 return pages > 0 ? (pages << h->order) : pages;
6561 }
6562
6563 /*
6564 * Update the reservation map for the range [from, to].
6565 *
6566 * Returns the number of entries that would be added to the reservation map
6567 * associated with the range [from, to]. This number is greater or equal to
6568 * zero. -EINVAL or -ENOMEM is returned in case of any errors.
6569 */
6570
hugetlb_reserve_pages(struct inode * inode,long from,long to,struct vm_area_desc * desc,vm_flags_t vm_flags)6571 long hugetlb_reserve_pages(struct inode *inode,
6572 long from, long to,
6573 struct vm_area_desc *desc,
6574 vm_flags_t vm_flags)
6575 {
6576 long chg = -1, add = -1, spool_resv, gbl_resv;
6577 struct hstate *h = hstate_inode(inode);
6578 struct hugepage_subpool *spool = subpool_inode(inode);
6579 struct resv_map *resv_map;
6580 struct hugetlb_cgroup *h_cg = NULL;
6581 long gbl_reserve, regions_needed = 0;
6582
6583 /* This should never happen */
6584 if (from > to) {
6585 VM_WARN(1, "%s called with a negative range\n", __func__);
6586 return -EINVAL;
6587 }
6588
6589 /*
6590 * Only apply hugepage reservation if asked. At fault time, an
6591 * attempt will be made for VM_NORESERVE to allocate a page
6592 * without using reserves
6593 */
6594 if (vm_flags & VM_NORESERVE)
6595 return 0;
6596
6597 /*
6598 * Shared mappings base their reservation on the number of pages that
6599 * are already allocated on behalf of the file. Private mappings need
6600 * to reserve the full area even if read-only as mprotect() may be
6601 * called to make the mapping read-write. Assume !desc is a shm mapping
6602 */
6603 if (!desc || desc->vm_flags & VM_MAYSHARE) {
6604 /*
6605 * resv_map can not be NULL as hugetlb_reserve_pages is only
6606 * called for inodes for which resv_maps were created (see
6607 * hugetlbfs_get_inode).
6608 */
6609 resv_map = inode_resv_map(inode);
6610
6611 chg = region_chg(resv_map, from, to, ®ions_needed);
6612 } else {
6613 /* Private mapping. */
6614 resv_map = resv_map_alloc();
6615 if (!resv_map)
6616 goto out_err;
6617
6618 chg = to - from;
6619
6620 set_vma_desc_resv_map(desc, resv_map);
6621 set_vma_desc_resv_flags(desc, HPAGE_RESV_OWNER);
6622 }
6623
6624 if (chg < 0)
6625 goto out_err;
6626
6627 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
6628 chg * pages_per_huge_page(h), &h_cg) < 0)
6629 goto out_err;
6630
6631 if (desc && !(desc->vm_flags & VM_MAYSHARE) && h_cg) {
6632 /* For private mappings, the hugetlb_cgroup uncharge info hangs
6633 * of the resv_map.
6634 */
6635 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
6636 }
6637
6638 /*
6639 * There must be enough pages in the subpool for the mapping. If
6640 * the subpool has a minimum size, there may be some global
6641 * reservations already in place (gbl_reserve).
6642 */
6643 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
6644 if (gbl_reserve < 0)
6645 goto out_uncharge_cgroup;
6646
6647 /*
6648 * Check enough hugepages are available for the reservation.
6649 * Hand the pages back to the subpool if there are not
6650 */
6651 if (hugetlb_acct_memory(h, gbl_reserve) < 0)
6652 goto out_put_pages;
6653
6654 /*
6655 * Account for the reservations made. Shared mappings record regions
6656 * that have reservations as they are shared by multiple VMAs.
6657 * When the last VMA disappears, the region map says how much
6658 * the reservation was and the page cache tells how much of
6659 * the reservation was consumed. Private mappings are per-VMA and
6660 * only the consumed reservations are tracked. When the VMA
6661 * disappears, the original reservation is the VMA size and the
6662 * consumed reservations are stored in the map. Hence, nothing
6663 * else has to be done for private mappings here
6664 */
6665 if (!desc || desc->vm_flags & VM_MAYSHARE) {
6666 add = region_add(resv_map, from, to, regions_needed, h, h_cg);
6667
6668 if (unlikely(add < 0)) {
6669 hugetlb_acct_memory(h, -gbl_reserve);
6670 goto out_put_pages;
6671 } else if (unlikely(chg > add)) {
6672 /*
6673 * pages in this range were added to the reserve
6674 * map between region_chg and region_add. This
6675 * indicates a race with alloc_hugetlb_folio. Adjust
6676 * the subpool and reserve counts modified above
6677 * based on the difference.
6678 */
6679 long rsv_adjust;
6680
6681 /*
6682 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
6683 * reference to h_cg->css. See comment below for detail.
6684 */
6685 hugetlb_cgroup_uncharge_cgroup_rsvd(
6686 hstate_index(h),
6687 (chg - add) * pages_per_huge_page(h), h_cg);
6688
6689 rsv_adjust = hugepage_subpool_put_pages(spool,
6690 chg - add);
6691 hugetlb_acct_memory(h, -rsv_adjust);
6692 } else if (h_cg) {
6693 /*
6694 * The file_regions will hold their own reference to
6695 * h_cg->css. So we should release the reference held
6696 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
6697 * done.
6698 */
6699 hugetlb_cgroup_put_rsvd_cgroup(h_cg);
6700 }
6701 }
6702 return chg;
6703
6704 out_put_pages:
6705 spool_resv = chg - gbl_reserve;
6706 if (spool_resv) {
6707 /* put sub pool's reservation back, chg - gbl_reserve */
6708 gbl_resv = hugepage_subpool_put_pages(spool, spool_resv);
6709 /*
6710 * subpool's reserved pages can not be put back due to race,
6711 * return to hstate.
6712 */
6713 hugetlb_acct_memory(h, -gbl_resv);
6714 }
6715 out_uncharge_cgroup:
6716 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
6717 chg * pages_per_huge_page(h), h_cg);
6718 out_err:
6719 if (!desc || desc->vm_flags & VM_MAYSHARE)
6720 /* Only call region_abort if the region_chg succeeded but the
6721 * region_add failed or didn't run.
6722 */
6723 if (chg >= 0 && add < 0)
6724 region_abort(resv_map, from, to, regions_needed);
6725 if (desc && is_vma_desc_resv_set(desc, HPAGE_RESV_OWNER)) {
6726 kref_put(&resv_map->refs, resv_map_release);
6727 set_vma_desc_resv_map(desc, NULL);
6728 }
6729 return chg < 0 ? chg : add < 0 ? add : -EINVAL;
6730 }
6731
hugetlb_unreserve_pages(struct inode * inode,long start,long end,long freed)6732 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
6733 long freed)
6734 {
6735 struct hstate *h = hstate_inode(inode);
6736 struct resv_map *resv_map = inode_resv_map(inode);
6737 long chg = 0;
6738 struct hugepage_subpool *spool = subpool_inode(inode);
6739 long gbl_reserve;
6740
6741 /*
6742 * Since this routine can be called in the evict inode path for all
6743 * hugetlbfs inodes, resv_map could be NULL.
6744 */
6745 if (resv_map) {
6746 chg = region_del(resv_map, start, end);
6747 /*
6748 * region_del() can fail in the rare case where a region
6749 * must be split and another region descriptor can not be
6750 * allocated. If end == LONG_MAX, it will not fail.
6751 */
6752 if (chg < 0)
6753 return chg;
6754 }
6755
6756 spin_lock(&inode->i_lock);
6757 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
6758 spin_unlock(&inode->i_lock);
6759
6760 /*
6761 * If the subpool has a minimum size, the number of global
6762 * reservations to be released may be adjusted.
6763 *
6764 * Note that !resv_map implies freed == 0. So (chg - freed)
6765 * won't go negative.
6766 */
6767 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
6768 hugetlb_acct_memory(h, -gbl_reserve);
6769
6770 return 0;
6771 }
6772
6773 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
page_table_shareable(struct vm_area_struct * svma,struct vm_area_struct * vma,unsigned long addr,pgoff_t idx)6774 static unsigned long page_table_shareable(struct vm_area_struct *svma,
6775 struct vm_area_struct *vma,
6776 unsigned long addr, pgoff_t idx)
6777 {
6778 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
6779 svma->vm_start;
6780 unsigned long sbase = saddr & PUD_MASK;
6781 unsigned long s_end = sbase + PUD_SIZE;
6782
6783 /* Allow segments to share if only one is marked locked */
6784 vm_flags_t vm_flags = vma->vm_flags & ~VM_LOCKED_MASK;
6785 vm_flags_t svm_flags = svma->vm_flags & ~VM_LOCKED_MASK;
6786
6787 /*
6788 * match the virtual addresses, permission and the alignment of the
6789 * page table page.
6790 *
6791 * Also, vma_lock (vm_private_data) is required for sharing.
6792 */
6793 if (pmd_index(addr) != pmd_index(saddr) ||
6794 vm_flags != svm_flags ||
6795 !range_in_vma(svma, sbase, s_end) ||
6796 !svma->vm_private_data)
6797 return 0;
6798
6799 return saddr;
6800 }
6801
want_pmd_share(struct vm_area_struct * vma,unsigned long addr)6802 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6803 {
6804 unsigned long start = addr & PUD_MASK;
6805 unsigned long end = start + PUD_SIZE;
6806
6807 #ifdef CONFIG_USERFAULTFD
6808 if (uffd_disable_huge_pmd_share(vma))
6809 return false;
6810 #endif
6811 /*
6812 * check on proper vm_flags and page table alignment
6813 */
6814 if (!(vma->vm_flags & VM_MAYSHARE))
6815 return false;
6816 if (!vma->vm_private_data) /* vma lock required for sharing */
6817 return false;
6818 if (!range_in_vma(vma, start, end))
6819 return false;
6820 return true;
6821 }
6822
6823 /*
6824 * Determine if start,end range within vma could be mapped by shared pmd.
6825 * If yes, adjust start and end to cover range associated with possible
6826 * shared pmd mappings.
6827 */
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)6828 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6829 unsigned long *start, unsigned long *end)
6830 {
6831 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
6832 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
6833
6834 /*
6835 * vma needs to span at least one aligned PUD size, and the range
6836 * must be at least partially within in.
6837 */
6838 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
6839 (*end <= v_start) || (*start >= v_end))
6840 return;
6841
6842 /* Extend the range to be PUD aligned for a worst case scenario */
6843 if (*start > v_start)
6844 *start = ALIGN_DOWN(*start, PUD_SIZE);
6845
6846 if (*end < v_end)
6847 *end = ALIGN(*end, PUD_SIZE);
6848 }
6849
6850 /*
6851 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
6852 * and returns the corresponding pte. While this is not necessary for the
6853 * !shared pmd case because we can allocate the pmd later as well, it makes the
6854 * code much cleaner. pmd allocation is essential for the shared case because
6855 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
6856 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
6857 * bad pmd for sharing.
6858 */
huge_pmd_share(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pud_t * pud)6859 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6860 unsigned long addr, pud_t *pud)
6861 {
6862 struct address_space *mapping = vma->vm_file->f_mapping;
6863 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
6864 vma->vm_pgoff;
6865 struct vm_area_struct *svma;
6866 unsigned long saddr;
6867 pte_t *spte = NULL;
6868 pte_t *pte;
6869
6870 i_mmap_lock_read(mapping);
6871 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
6872 if (svma == vma)
6873 continue;
6874
6875 saddr = page_table_shareable(svma, vma, addr, idx);
6876 if (saddr) {
6877 spte = hugetlb_walk(svma, saddr,
6878 vma_mmu_pagesize(svma));
6879 if (spte) {
6880 ptdesc_pmd_pts_inc(virt_to_ptdesc(spte));
6881 break;
6882 }
6883 }
6884 }
6885
6886 if (!spte)
6887 goto out;
6888
6889 spin_lock(&mm->page_table_lock);
6890 if (pud_none(*pud)) {
6891 pud_populate(mm, pud,
6892 (pmd_t *)((unsigned long)spte & PAGE_MASK));
6893 mm_inc_nr_pmds(mm);
6894 } else {
6895 ptdesc_pmd_pts_dec(virt_to_ptdesc(spte));
6896 }
6897 spin_unlock(&mm->page_table_lock);
6898 out:
6899 pte = (pte_t *)pmd_alloc(mm, pud, addr);
6900 i_mmap_unlock_read(mapping);
6901 return pte;
6902 }
6903
6904 /*
6905 * unmap huge page backed by shared pte.
6906 *
6907 * Called with page table lock held.
6908 *
6909 * returns: 1 successfully unmapped a shared pte page
6910 * 0 the underlying pte page is not shared, or it is the last user
6911 */
huge_pmd_unshare(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)6912 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
6913 unsigned long addr, pte_t *ptep)
6914 {
6915 unsigned long sz = huge_page_size(hstate_vma(vma));
6916 pgd_t *pgd = pgd_offset(mm, addr);
6917 p4d_t *p4d = p4d_offset(pgd, addr);
6918 pud_t *pud = pud_offset(p4d, addr);
6919
6920 if (sz != PMD_SIZE)
6921 return 0;
6922 if (!ptdesc_pmd_is_shared(virt_to_ptdesc(ptep)))
6923 return 0;
6924 i_mmap_assert_write_locked(vma->vm_file->f_mapping);
6925 hugetlb_vma_assert_locked(vma);
6926 pud_clear(pud);
6927 /*
6928 * Once our caller drops the rmap lock, some other process might be
6929 * using this page table as a normal, non-hugetlb page table.
6930 * Wait for pending gup_fast() in other threads to finish before letting
6931 * that happen.
6932 */
6933 tlb_remove_table_sync_one();
6934 ptdesc_pmd_pts_dec(virt_to_ptdesc(ptep));
6935 mm_dec_nr_pmds(mm);
6936 return 1;
6937 }
6938
6939 #else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
6940
huge_pmd_share(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pud_t * pud)6941 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6942 unsigned long addr, pud_t *pud)
6943 {
6944 return NULL;
6945 }
6946
huge_pmd_unshare(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)6947 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
6948 unsigned long addr, pte_t *ptep)
6949 {
6950 return 0;
6951 }
6952
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)6953 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6954 unsigned long *start, unsigned long *end)
6955 {
6956 }
6957
want_pmd_share(struct vm_area_struct * vma,unsigned long addr)6958 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6959 {
6960 return false;
6961 }
6962 #endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
6963
6964 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
huge_pte_alloc(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,unsigned long sz)6965 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
6966 unsigned long addr, unsigned long sz)
6967 {
6968 pgd_t *pgd;
6969 p4d_t *p4d;
6970 pud_t *pud;
6971 pte_t *pte = NULL;
6972
6973 pgd = pgd_offset(mm, addr);
6974 p4d = p4d_alloc(mm, pgd, addr);
6975 if (!p4d)
6976 return NULL;
6977 pud = pud_alloc(mm, p4d, addr);
6978 if (pud) {
6979 if (sz == PUD_SIZE) {
6980 pte = (pte_t *)pud;
6981 } else {
6982 BUG_ON(sz != PMD_SIZE);
6983 if (want_pmd_share(vma, addr) && pud_none(*pud))
6984 pte = huge_pmd_share(mm, vma, addr, pud);
6985 else
6986 pte = (pte_t *)pmd_alloc(mm, pud, addr);
6987 }
6988 }
6989
6990 if (pte) {
6991 pte_t pteval = ptep_get_lockless(pte);
6992
6993 BUG_ON(pte_present(pteval) && !pte_huge(pteval));
6994 }
6995
6996 return pte;
6997 }
6998
6999 /*
7000 * huge_pte_offset() - Walk the page table to resolve the hugepage
7001 * entry at address @addr
7002 *
7003 * Return: Pointer to page table entry (PUD or PMD) for
7004 * address @addr, or NULL if a !p*d_present() entry is encountered and the
7005 * size @sz doesn't match the hugepage size at this level of the page
7006 * table.
7007 */
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)7008 pte_t *huge_pte_offset(struct mm_struct *mm,
7009 unsigned long addr, unsigned long sz)
7010 {
7011 pgd_t *pgd;
7012 p4d_t *p4d;
7013 pud_t *pud;
7014 pmd_t *pmd;
7015
7016 pgd = pgd_offset(mm, addr);
7017 if (!pgd_present(*pgd))
7018 return NULL;
7019 p4d = p4d_offset(pgd, addr);
7020 if (!p4d_present(*p4d))
7021 return NULL;
7022
7023 pud = pud_offset(p4d, addr);
7024 if (sz == PUD_SIZE)
7025 /* must be pud huge, non-present or none */
7026 return (pte_t *)pud;
7027 if (!pud_present(*pud))
7028 return NULL;
7029 /* must have a valid entry and size to go further */
7030
7031 pmd = pmd_offset(pud, addr);
7032 /* must be pmd huge, non-present or none */
7033 return (pte_t *)pmd;
7034 }
7035
7036 /*
7037 * Return a mask that can be used to update an address to the last huge
7038 * page in a page table page mapping size. Used to skip non-present
7039 * page table entries when linearly scanning address ranges. Architectures
7040 * with unique huge page to page table relationships can define their own
7041 * version of this routine.
7042 */
hugetlb_mask_last_page(struct hstate * h)7043 unsigned long hugetlb_mask_last_page(struct hstate *h)
7044 {
7045 unsigned long hp_size = huge_page_size(h);
7046
7047 if (hp_size == PUD_SIZE)
7048 return P4D_SIZE - PUD_SIZE;
7049 else if (hp_size == PMD_SIZE)
7050 return PUD_SIZE - PMD_SIZE;
7051 else
7052 return 0UL;
7053 }
7054
7055 #else
7056
7057 /* See description above. Architectures can provide their own version. */
hugetlb_mask_last_page(struct hstate * h)7058 __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
7059 {
7060 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
7061 if (huge_page_size(h) == PMD_SIZE)
7062 return PUD_SIZE - PMD_SIZE;
7063 #endif
7064 return 0UL;
7065 }
7066
7067 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
7068
7069 /**
7070 * folio_isolate_hugetlb - try to isolate an allocated hugetlb folio
7071 * @folio: the folio to isolate
7072 * @list: the list to add the folio to on success
7073 *
7074 * Isolate an allocated (refcount > 0) hugetlb folio, marking it as
7075 * isolated/non-migratable, and moving it from the active list to the
7076 * given list.
7077 *
7078 * Isolation will fail if @folio is not an allocated hugetlb folio, or if
7079 * it is already isolated/non-migratable.
7080 *
7081 * On success, an additional folio reference is taken that must be dropped
7082 * using folio_putback_hugetlb() to undo the isolation.
7083 *
7084 * Return: True if isolation worked, otherwise False.
7085 */
folio_isolate_hugetlb(struct folio * folio,struct list_head * list)7086 bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list)
7087 {
7088 bool ret = true;
7089
7090 spin_lock_irq(&hugetlb_lock);
7091 if (!folio_test_hugetlb(folio) ||
7092 !folio_test_hugetlb_migratable(folio) ||
7093 !folio_try_get(folio)) {
7094 ret = false;
7095 goto unlock;
7096 }
7097 folio_clear_hugetlb_migratable(folio);
7098 list_move_tail(&folio->lru, list);
7099 unlock:
7100 spin_unlock_irq(&hugetlb_lock);
7101 return ret;
7102 }
7103
get_hwpoison_hugetlb_folio(struct folio * folio,bool * hugetlb,bool unpoison)7104 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
7105 {
7106 int ret = 0;
7107
7108 *hugetlb = false;
7109 spin_lock_irq(&hugetlb_lock);
7110 if (folio_test_hugetlb(folio)) {
7111 *hugetlb = true;
7112 if (folio_test_hugetlb_freed(folio))
7113 ret = 0;
7114 else if (folio_test_hugetlb_migratable(folio) || unpoison)
7115 ret = folio_try_get(folio);
7116 else
7117 ret = -EBUSY;
7118 }
7119 spin_unlock_irq(&hugetlb_lock);
7120 return ret;
7121 }
7122
get_huge_page_for_hwpoison(unsigned long pfn,int flags,bool * migratable_cleared)7123 int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
7124 bool *migratable_cleared)
7125 {
7126 int ret;
7127
7128 spin_lock_irq(&hugetlb_lock);
7129 ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared);
7130 spin_unlock_irq(&hugetlb_lock);
7131 return ret;
7132 }
7133
7134 /**
7135 * folio_putback_hugetlb - unisolate a hugetlb folio
7136 * @folio: the isolated hugetlb folio
7137 *
7138 * Putback/un-isolate the hugetlb folio that was previous isolated using
7139 * folio_isolate_hugetlb(): marking it non-isolated/migratable and putting it
7140 * back onto the active list.
7141 *
7142 * Will drop the additional folio reference obtained through
7143 * folio_isolate_hugetlb().
7144 */
folio_putback_hugetlb(struct folio * folio)7145 void folio_putback_hugetlb(struct folio *folio)
7146 {
7147 spin_lock_irq(&hugetlb_lock);
7148 folio_set_hugetlb_migratable(folio);
7149 list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist);
7150 spin_unlock_irq(&hugetlb_lock);
7151 folio_put(folio);
7152 }
7153
move_hugetlb_state(struct folio * old_folio,struct folio * new_folio,int reason)7154 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
7155 {
7156 struct hstate *h = folio_hstate(old_folio);
7157
7158 hugetlb_cgroup_migrate(old_folio, new_folio);
7159 folio_set_owner_migrate_reason(new_folio, reason);
7160
7161 /*
7162 * transfer temporary state of the new hugetlb folio. This is
7163 * reverse to other transitions because the newpage is going to
7164 * be final while the old one will be freed so it takes over
7165 * the temporary status.
7166 *
7167 * Also note that we have to transfer the per-node surplus state
7168 * here as well otherwise the global surplus count will not match
7169 * the per-node's.
7170 */
7171 if (folio_test_hugetlb_temporary(new_folio)) {
7172 int old_nid = folio_nid(old_folio);
7173 int new_nid = folio_nid(new_folio);
7174
7175 folio_set_hugetlb_temporary(old_folio);
7176 folio_clear_hugetlb_temporary(new_folio);
7177
7178
7179 /*
7180 * There is no need to transfer the per-node surplus state
7181 * when we do not cross the node.
7182 */
7183 if (new_nid == old_nid)
7184 return;
7185 spin_lock_irq(&hugetlb_lock);
7186 if (h->surplus_huge_pages_node[old_nid]) {
7187 h->surplus_huge_pages_node[old_nid]--;
7188 h->surplus_huge_pages_node[new_nid]++;
7189 }
7190 spin_unlock_irq(&hugetlb_lock);
7191 }
7192
7193 /*
7194 * Our old folio is isolated and has "migratable" cleared until it
7195 * is putback. As migration succeeded, set the new folio "migratable"
7196 * and add it to the active list.
7197 */
7198 spin_lock_irq(&hugetlb_lock);
7199 folio_set_hugetlb_migratable(new_folio);
7200 list_move_tail(&new_folio->lru, &(folio_hstate(new_folio))->hugepage_activelist);
7201 spin_unlock_irq(&hugetlb_lock);
7202 }
7203
7204 /*
7205 * If @take_locks is false, the caller must ensure that no concurrent page table
7206 * access can happen (except for gup_fast() and hardware page walks).
7207 * If @take_locks is true, we take the hugetlb VMA lock (to lock out things like
7208 * concurrent page fault handling) and the file rmap lock.
7209 */
hugetlb_unshare_pmds(struct vm_area_struct * vma,unsigned long start,unsigned long end,bool take_locks)7210 static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
7211 unsigned long start,
7212 unsigned long end,
7213 bool take_locks)
7214 {
7215 struct hstate *h = hstate_vma(vma);
7216 unsigned long sz = huge_page_size(h);
7217 struct mm_struct *mm = vma->vm_mm;
7218 struct mmu_notifier_range range;
7219 unsigned long address;
7220 spinlock_t *ptl;
7221 pte_t *ptep;
7222
7223 if (!(vma->vm_flags & VM_MAYSHARE))
7224 return;
7225
7226 if (start >= end)
7227 return;
7228
7229 flush_cache_range(vma, start, end);
7230 /*
7231 * No need to call adjust_range_if_pmd_sharing_possible(), because
7232 * we have already done the PUD_SIZE alignment.
7233 */
7234 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
7235 start, end);
7236 mmu_notifier_invalidate_range_start(&range);
7237 if (take_locks) {
7238 hugetlb_vma_lock_write(vma);
7239 i_mmap_lock_write(vma->vm_file->f_mapping);
7240 } else {
7241 i_mmap_assert_write_locked(vma->vm_file->f_mapping);
7242 }
7243 for (address = start; address < end; address += PUD_SIZE) {
7244 ptep = hugetlb_walk(vma, address, sz);
7245 if (!ptep)
7246 continue;
7247 ptl = huge_pte_lock(h, mm, ptep);
7248 huge_pmd_unshare(mm, vma, address, ptep);
7249 spin_unlock(ptl);
7250 }
7251 flush_hugetlb_tlb_range(vma, start, end);
7252 if (take_locks) {
7253 i_mmap_unlock_write(vma->vm_file->f_mapping);
7254 hugetlb_vma_unlock_write(vma);
7255 }
7256 /*
7257 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see
7258 * Documentation/mm/mmu_notifier.rst.
7259 */
7260 mmu_notifier_invalidate_range_end(&range);
7261 }
7262
7263 /*
7264 * This function will unconditionally remove all the shared pmd pgtable entries
7265 * within the specific vma for a hugetlbfs memory range.
7266 */
hugetlb_unshare_all_pmds(struct vm_area_struct * vma)7267 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
7268 {
7269 hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
7270 ALIGN_DOWN(vma->vm_end, PUD_SIZE),
7271 /* take_locks = */ true);
7272 }
7273
7274 /*
7275 * For hugetlb, mremap() is an odd edge case - while the VMA copying is
7276 * performed, we permit both the old and new VMAs to reference the same
7277 * reservation.
7278 *
7279 * We fix this up after the operation succeeds, or if a newly allocated VMA
7280 * is closed as a result of a failure to allocate memory.
7281 */
fixup_hugetlb_reservations(struct vm_area_struct * vma)7282 void fixup_hugetlb_reservations(struct vm_area_struct *vma)
7283 {
7284 if (is_vm_hugetlb_page(vma))
7285 clear_vma_resv_huge_pages(vma);
7286 }
7287