xref: /linux/include/linux/hugetlb.h (revision 743758ccf8bede3e7c38f3f7d3f5131aa0a7b4a6)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
4 
5 #include <linux/mm.h>
6 #include <linux/mm_types.h>
7 #include <linux/mmdebug.h>
8 #include <linux/fs.h>
9 #include <linux/hugetlb_inline.h>
10 #include <linux/cgroup.h>
11 #include <linux/page_ref.h>
12 #include <linux/list.h>
13 #include <linux/kref.h>
14 #include <linux/pgtable.h>
15 #include <linux/gfp.h>
16 #include <linux/userfaultfd_k.h>
17 #include <linux/nodemask.h>
18 
19 struct ctl_table;
20 struct user_struct;
21 struct mmu_gather;
22 struct node;
23 
24 void free_huge_folio(struct folio *folio);
25 
26 #ifdef CONFIG_HUGETLB_PAGE
27 
28 #include <linux/pagemap.h>
29 #include <linux/shm.h>
30 #include <asm/tlbflush.h>
31 
32 /*
33  * For HugeTLB page, there are more metadata to save in the struct page. But
34  * the head struct page cannot meet our needs, so we have to abuse other tail
35  * struct page to store the metadata.
36  */
37 #define __NR_USED_SUBPAGE 3
38 
39 struct hugepage_subpool {
40 	spinlock_t lock;
41 	long count;
42 	long max_hpages;	/* Maximum huge pages or -1 if no maximum. */
43 	long used_hpages;	/* Used count against maximum, includes */
44 				/* both allocated and reserved pages. */
45 	struct hstate *hstate;
46 	long min_hpages;	/* Minimum huge pages or -1 if no minimum. */
47 	long rsv_hpages;	/* Pages reserved against global pool to */
48 				/* satisfy minimum size. */
49 };
50 
51 struct resv_map {
52 	struct kref refs;
53 	spinlock_t lock;
54 	struct list_head regions;
55 	long adds_in_progress;
56 	struct list_head region_cache;
57 	long region_cache_count;
58 	struct rw_semaphore rw_sema;
59 #ifdef CONFIG_CGROUP_HUGETLB
60 	/*
61 	 * On private mappings, the counter to uncharge reservations is stored
62 	 * here. If these fields are 0, then either the mapping is shared, or
63 	 * cgroup accounting is disabled for this resv_map.
64 	 */
65 	struct page_counter *reservation_counter;
66 	unsigned long pages_per_hpage;
67 	struct cgroup_subsys_state *css;
68 #endif
69 };
70 
71 /*
72  * Region tracking -- allows tracking of reservations and instantiated pages
73  *                    across the pages in a mapping.
74  *
75  * The region data structures are embedded into a resv_map and protected
76  * by a resv_map's lock.  The set of regions within the resv_map represent
77  * reservations for huge pages, or huge pages that have already been
78  * instantiated within the map.  The from and to elements are huge page
79  * indices into the associated mapping.  from indicates the starting index
80  * of the region.  to represents the first index past the end of  the region.
81  *
82  * For example, a file region structure with from == 0 and to == 4 represents
83  * four huge pages in a mapping.  It is important to note that the to element
84  * represents the first element past the end of the region. This is used in
85  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
86  *
87  * Interval notation of the form [from, to) will be used to indicate that
88  * the endpoint from is inclusive and to is exclusive.
89  */
90 struct file_region {
91 	struct list_head link;
92 	long from;
93 	long to;
94 #ifdef CONFIG_CGROUP_HUGETLB
95 	/*
96 	 * On shared mappings, each reserved region appears as a struct
97 	 * file_region in resv_map. These fields hold the info needed to
98 	 * uncharge each reservation.
99 	 */
100 	struct page_counter *reservation_counter;
101 	struct cgroup_subsys_state *css;
102 #endif
103 };
104 
105 struct hugetlb_vma_lock {
106 	struct kref refs;
107 	struct rw_semaphore rw_sema;
108 	struct vm_area_struct *vma;
109 };
110 
111 extern struct resv_map *resv_map_alloc(void);
112 void resv_map_release(struct kref *ref);
113 
114 extern spinlock_t hugetlb_lock;
115 extern int hugetlb_max_hstate __read_mostly;
116 #define for_each_hstate(h) \
117 	for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
118 
119 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
120 						long min_hpages);
121 void hugepage_put_subpool(struct hugepage_subpool *spool);
122 
123 void hugetlb_dup_vma_private(struct vm_area_struct *vma);
124 void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
125 int move_hugetlb_page_tables(struct vm_area_struct *vma,
126 			     struct vm_area_struct *new_vma,
127 			     unsigned long old_addr, unsigned long new_addr,
128 			     unsigned long len);
129 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
130 			    struct vm_area_struct *, struct vm_area_struct *);
131 void unmap_hugepage_range(struct vm_area_struct *,
132 			  unsigned long start, unsigned long end,
133 			  struct folio *, zap_flags_t);
134 void __unmap_hugepage_range(struct mmu_gather *tlb,
135 			  struct vm_area_struct *vma,
136 			  unsigned long start, unsigned long end,
137 			  struct folio *, zap_flags_t zap_flags);
138 void hugetlb_report_meminfo(struct seq_file *);
139 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
140 void hugetlb_show_meminfo_node(int nid);
141 unsigned long hugetlb_total_pages(void);
142 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
143 			unsigned long address, unsigned int flags);
144 #ifdef CONFIG_USERFAULTFD
145 int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
146 			     struct vm_area_struct *dst_vma,
147 			     unsigned long dst_addr,
148 			     unsigned long src_addr,
149 			     uffd_flags_t flags,
150 			     struct folio **foliop);
151 #endif /* CONFIG_USERFAULTFD */
152 long hugetlb_reserve_pages(struct inode *inode, long from, long to,
153 			   struct vm_area_desc *desc, vm_flags_t vm_flags);
154 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
155 						long freed);
156 bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list);
157 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
158 int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
159 				bool *migratable_cleared);
160 void folio_putback_hugetlb(struct folio *folio);
161 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
162 void hugetlb_fix_reserve_counts(struct inode *inode);
163 extern struct mutex *hugetlb_fault_mutex_table;
164 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
165 
166 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
167 		      unsigned long addr, pud_t *pud);
168 bool hugetlbfs_pagecache_present(struct hstate *h,
169 				 struct vm_area_struct *vma,
170 				 unsigned long address);
171 
172 struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
173 
174 extern int movable_gigantic_pages __read_mostly;
175 extern int sysctl_hugetlb_shm_group __read_mostly;
176 extern struct list_head huge_boot_pages[MAX_NUMNODES];
177 
178 void hugetlb_bootmem_alloc(void);
179 extern nodemask_t hugetlb_bootmem_nodes;
180 void hugetlb_bootmem_set_nodes(void);
181 
182 /* arch callbacks */
183 
184 #ifndef CONFIG_HIGHPTE
185 /*
186  * pte_offset_huge() and pte_alloc_huge() are helpers for those architectures
187  * which may go down to the lowest PTE level in their huge_pte_offset() and
188  * huge_pte_alloc(): to avoid reliance on pte_offset_map() without pte_unmap().
189  */
190 static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address)
191 {
192 	return pte_offset_kernel(pmd, address);
193 }
194 static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd,
195 				    unsigned long address)
196 {
197 	return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address);
198 }
199 #endif
200 
201 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
202 			unsigned long addr, unsigned long sz);
203 /*
204  * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE.
205  * Returns the pte_t* if found, or NULL if the address is not mapped.
206  *
207  * IMPORTANT: we should normally not directly call this function, instead
208  * this is only a common interface to implement arch-specific
209  * walker. Please use hugetlb_walk() instead, because that will attempt to
210  * verify the locking for you.
211  *
212  * Since this function will walk all the pgtable pages (including not only
213  * high-level pgtable page, but also PUD entry that can be unshared
214  * concurrently for VM_SHARED), the caller of this function should be
215  * responsible of its thread safety.  One can follow this rule:
216  *
217  *  (1) For private mappings: pmd unsharing is not possible, so holding the
218  *      mmap_lock for either read or write is sufficient. Most callers
219  *      already hold the mmap_lock, so normally, no special action is
220  *      required.
221  *
222  *  (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged
223  *      pgtable page can go away from under us!  It can be done by a pmd
224  *      unshare with a follow up munmap() on the other process), then we
225  *      need either:
226  *
227  *     (2.1) hugetlb vma lock read or write held, to make sure pmd unshare
228  *           won't happen upon the range (it also makes sure the pte_t we
229  *           read is the right and stable one), or,
230  *
231  *     (2.2) hugetlb mapping i_mmap_rwsem lock held read or write, to make
232  *           sure even if unshare happened the racy unmap() will wait until
233  *           i_mmap_rwsem is released.
234  *
235  * Option (2.1) is the safest, which guarantees pte stability from pmd
236  * sharing pov, until the vma lock released.  Option (2.2) doesn't protect
237  * a concurrent pmd unshare, but it makes sure the pgtable page is safe to
238  * access.
239  */
240 pte_t *huge_pte_offset(struct mm_struct *mm,
241 		       unsigned long addr, unsigned long sz);
242 unsigned long hugetlb_mask_last_page(struct hstate *h);
243 int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma,
244 		unsigned long addr, pte_t *ptep);
245 void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma);
246 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
247 				unsigned long *start, unsigned long *end);
248 
249 extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
250 				unsigned long *begin, unsigned long *end);
251 extern void __hugetlb_zap_end(struct vm_area_struct *vma,
252 			      struct zap_details *details);
253 
254 static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
255 				     unsigned long *start, unsigned long *end)
256 {
257 	if (is_vm_hugetlb_page(vma))
258 		__hugetlb_zap_begin(vma, start, end);
259 }
260 
261 static inline void hugetlb_zap_end(struct vm_area_struct *vma,
262 				   struct zap_details *details)
263 {
264 	if (is_vm_hugetlb_page(vma))
265 		__hugetlb_zap_end(vma, details);
266 }
267 
268 void hugetlb_vma_lock_read(struct vm_area_struct *vma);
269 void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
270 void hugetlb_vma_lock_write(struct vm_area_struct *vma);
271 void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
272 int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
273 void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
274 void hugetlb_vma_lock_release(struct kref *kref);
275 long hugetlb_change_protection(struct vm_area_struct *vma,
276 		unsigned long address, unsigned long end, pgprot_t newprot,
277 		unsigned long cp_flags);
278 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
279 void fixup_hugetlb_reservations(struct vm_area_struct *vma);
280 void hugetlb_split(struct vm_area_struct *vma, unsigned long addr);
281 int hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
282 
283 unsigned int arch_hugetlb_cma_order(void);
284 
285 #else /* !CONFIG_HUGETLB_PAGE */
286 
287 static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
288 {
289 }
290 
291 static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
292 {
293 }
294 
295 static inline unsigned long hugetlb_total_pages(void)
296 {
297 	return 0;
298 }
299 
300 static inline struct address_space *hugetlb_folio_mapping_lock_write(
301 							struct folio *folio)
302 {
303 	return NULL;
304 }
305 
306 static inline int huge_pmd_unshare(struct mmu_gather *tlb,
307 		struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
308 {
309 	return 0;
310 }
311 
312 static inline void huge_pmd_unshare_flush(struct mmu_gather *tlb,
313 		struct vm_area_struct *vma)
314 {
315 }
316 
317 static inline void adjust_range_if_pmd_sharing_possible(
318 				struct vm_area_struct *vma,
319 				unsigned long *start, unsigned long *end)
320 {
321 }
322 
323 static inline void hugetlb_zap_begin(
324 				struct vm_area_struct *vma,
325 				unsigned long *start, unsigned long *end)
326 {
327 }
328 
329 static inline void hugetlb_zap_end(
330 				struct vm_area_struct *vma,
331 				struct zap_details *details)
332 {
333 }
334 
335 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
336 					  struct mm_struct *src,
337 					  struct vm_area_struct *dst_vma,
338 					  struct vm_area_struct *src_vma)
339 {
340 	BUG();
341 	return 0;
342 }
343 
344 static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
345 					   struct vm_area_struct *new_vma,
346 					   unsigned long old_addr,
347 					   unsigned long new_addr,
348 					   unsigned long len)
349 {
350 	BUG();
351 	return 0;
352 }
353 
354 static inline void hugetlb_report_meminfo(struct seq_file *m)
355 {
356 }
357 
358 static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
359 {
360 	return 0;
361 }
362 
363 static inline void hugetlb_show_meminfo_node(int nid)
364 {
365 }
366 
367 static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
368 {
369 }
370 
371 static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
372 {
373 }
374 
375 static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
376 {
377 }
378 
379 static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
380 {
381 }
382 
383 static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
384 {
385 	return 1;
386 }
387 
388 static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
389 {
390 }
391 
392 static inline int is_hugepage_only_range(struct mm_struct *mm,
393 					unsigned long addr, unsigned long len)
394 {
395 	return 0;
396 }
397 
398 #ifdef CONFIG_USERFAULTFD
399 static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
400 					   struct vm_area_struct *dst_vma,
401 					   unsigned long dst_addr,
402 					   unsigned long src_addr,
403 					   uffd_flags_t flags,
404 					   struct folio **foliop)
405 {
406 	BUG();
407 	return 0;
408 }
409 #endif /* CONFIG_USERFAULTFD */
410 
411 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
412 					unsigned long sz)
413 {
414 	return NULL;
415 }
416 
417 static inline bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list)
418 {
419 	return false;
420 }
421 
422 static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
423 {
424 	return 0;
425 }
426 
427 static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
428 					bool *migratable_cleared)
429 {
430 	return 0;
431 }
432 
433 static inline void folio_putback_hugetlb(struct folio *folio)
434 {
435 }
436 
437 static inline void move_hugetlb_state(struct folio *old_folio,
438 					struct folio *new_folio, int reason)
439 {
440 }
441 
442 static inline long hugetlb_change_protection(
443 			struct vm_area_struct *vma, unsigned long address,
444 			unsigned long end, pgprot_t newprot,
445 			unsigned long cp_flags)
446 {
447 	return 0;
448 }
449 
450 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
451 			struct vm_area_struct *vma, unsigned long start,
452 			unsigned long end, struct folio *folio,
453 			zap_flags_t zap_flags)
454 {
455 	BUG();
456 }
457 
458 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
459 			struct vm_area_struct *vma, unsigned long address,
460 			unsigned int flags)
461 {
462 	BUG();
463 	return 0;
464 }
465 
466 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
467 
468 static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
469 {
470 }
471 
472 static inline void hugetlb_split(struct vm_area_struct *vma, unsigned long addr) {}
473 
474 static inline int hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
475 {
476 	return 0;
477 }
478 
479 #endif /* !CONFIG_HUGETLB_PAGE */
480 
481 #ifndef pgd_write
482 static inline int pgd_write(pgd_t pgd)
483 {
484 	BUG();
485 	return 0;
486 }
487 #endif
488 
489 #define HUGETLB_ANON_FILE "anon_hugepage"
490 
491 enum {
492 	/*
493 	 * The file will be used as an shm file so shmfs accounting rules
494 	 * apply
495 	 */
496 	HUGETLB_SHMFS_INODE     = 1,
497 	/*
498 	 * The file is being created on the internal vfs mount and shmfs
499 	 * accounting rules do not apply
500 	 */
501 	HUGETLB_ANONHUGE_INODE  = 2,
502 };
503 
504 #ifdef CONFIG_HUGETLBFS
505 struct hugetlbfs_sb_info {
506 	long	max_inodes;   /* inodes allowed */
507 	long	free_inodes;  /* inodes free */
508 	spinlock_t	stat_lock;
509 	struct hstate *hstate;
510 	struct hugepage_subpool *spool;
511 	kuid_t	uid;
512 	kgid_t	gid;
513 	umode_t mode;
514 };
515 
516 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
517 {
518 	return sb->s_fs_info;
519 }
520 
521 struct hugetlbfs_inode_info {
522 	struct inode vfs_inode;
523 	unsigned int seals;
524 };
525 
526 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
527 {
528 	return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
529 }
530 
531 extern const struct vm_operations_struct hugetlb_vm_ops;
532 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
533 				int creat_flags, int page_size_log);
534 
535 static inline bool is_file_hugepages(const struct file *file)
536 {
537 	return file->f_op->fop_flags & FOP_HUGE_PAGES;
538 }
539 
540 static inline struct hstate *hstate_inode(struct inode *i)
541 {
542 	return HUGETLBFS_SB(i->i_sb)->hstate;
543 }
544 #else /* !CONFIG_HUGETLBFS */
545 
546 #define is_file_hugepages(file)			false
547 static inline struct file *
548 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
549 		int creat_flags, int page_size_log)
550 {
551 	return ERR_PTR(-ENOSYS);
552 }
553 
554 static inline struct hstate *hstate_inode(struct inode *i)
555 {
556 	return NULL;
557 }
558 #endif /* !CONFIG_HUGETLBFS */
559 
560 unsigned long
561 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
562 				    unsigned long len, unsigned long pgoff,
563 				    unsigned long flags);
564 
565 /*
566  * huegtlb page specific state flags.  These flags are located in page.private
567  * of the hugetlb head page.  Functions created via the below macros should be
568  * used to manipulate these flags.
569  *
570  * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
571  *	allocation time.  Cleared when page is fully instantiated.  Free
572  *	routine checks flag to restore a reservation on error paths.
573  *	Synchronization:  Examined or modified by code that knows it has
574  *	the only reference to page.  i.e. After allocation but before use
575  *	or when the page is being freed.
576  * HPG_migratable  - Set after a newly allocated page is added to the page
577  *	cache and/or page tables.  Indicates the page is a candidate for
578  *	migration.
579  *	Synchronization:  Initially set after new page allocation with no
580  *	locking.  When examined and modified during migration processing
581  *	(isolate, migrate, putback) the hugetlb_lock is held.
582  * HPG_temporary - Set on a page that is temporarily allocated from the buddy
583  *	allocator.  Typically used for migration target pages when no pages
584  *	are available in the pool.  The hugetlb free page path will
585  *	immediately free pages with this flag set to the buddy allocator.
586  *	Synchronization: Can be set after huge page allocation from buddy when
587  *	code knows it has only reference.  All other examinations and
588  *	modifications require hugetlb_lock.
589  * HPG_freed - Set when page is on the free lists.
590  *	Synchronization: hugetlb_lock held for examination and modification.
591  * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
592  * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
593  *     that is not tracked by raw_hwp_page list.
594  */
595 enum hugetlb_page_flags {
596 	HPG_restore_reserve = 0,
597 	HPG_migratable,
598 	HPG_temporary,
599 	HPG_freed,
600 	HPG_vmemmap_optimized,
601 	HPG_raw_hwp_unreliable,
602 	HPG_cma,
603 	__NR_HPAGEFLAGS,
604 };
605 
606 /*
607  * Macros to create test, set and clear function definitions for
608  * hugetlb specific page flags.
609  */
610 #ifdef CONFIG_HUGETLB_PAGE
611 #define TESTHPAGEFLAG(uname, flname)				\
612 static __always_inline						\
613 bool folio_test_hugetlb_##flname(struct folio *folio)		\
614 	{	void *private = &folio->private;		\
615 		return test_bit(HPG_##flname, private);		\
616 	}
617 
618 #define SETHPAGEFLAG(uname, flname)				\
619 static __always_inline						\
620 void folio_set_hugetlb_##flname(struct folio *folio)		\
621 	{	void *private = &folio->private;		\
622 		set_bit(HPG_##flname, private);			\
623 	}
624 
625 #define CLEARHPAGEFLAG(uname, flname)				\
626 static __always_inline						\
627 void folio_clear_hugetlb_##flname(struct folio *folio)		\
628 	{	void *private = &folio->private;		\
629 		clear_bit(HPG_##flname, private);		\
630 	}
631 #else
632 #define TESTHPAGEFLAG(uname, flname)				\
633 static inline bool						\
634 folio_test_hugetlb_##flname(struct folio *folio)		\
635 	{ return 0; }
636 
637 #define SETHPAGEFLAG(uname, flname)				\
638 static inline void						\
639 folio_set_hugetlb_##flname(struct folio *folio) 		\
640 	{ }
641 
642 #define CLEARHPAGEFLAG(uname, flname)				\
643 static inline void						\
644 folio_clear_hugetlb_##flname(struct folio *folio)		\
645 	{ }
646 #endif
647 
648 #define HPAGEFLAG(uname, flname)				\
649 	TESTHPAGEFLAG(uname, flname)				\
650 	SETHPAGEFLAG(uname, flname)				\
651 	CLEARHPAGEFLAG(uname, flname)				\
652 
653 /*
654  * Create functions associated with hugetlb page flags
655  */
656 HPAGEFLAG(RestoreReserve, restore_reserve)
657 HPAGEFLAG(Migratable, migratable)
658 HPAGEFLAG(Temporary, temporary)
659 HPAGEFLAG(Freed, freed)
660 HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
661 HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
662 HPAGEFLAG(Cma, cma)
663 
664 #ifdef CONFIG_HUGETLB_PAGE
665 
666 #define HSTATE_NAME_LEN 32
667 /* Defines one hugetlb page size */
668 struct hstate {
669 	struct mutex resize_lock;
670 	struct lock_class_key resize_key;
671 	int next_nid_to_alloc;
672 	int next_nid_to_free;
673 	unsigned int order;
674 	unsigned int demote_order;
675 	unsigned long mask;
676 	unsigned long max_huge_pages;
677 	unsigned long nr_huge_pages;
678 	unsigned long free_huge_pages;
679 	unsigned long resv_huge_pages;
680 	unsigned long surplus_huge_pages;
681 	unsigned long nr_overcommit_huge_pages;
682 	struct list_head hugepage_activelist;
683 	struct list_head hugepage_freelists[MAX_NUMNODES];
684 	unsigned int max_huge_pages_node[MAX_NUMNODES];
685 	unsigned int nr_huge_pages_node[MAX_NUMNODES];
686 	unsigned int free_huge_pages_node[MAX_NUMNODES];
687 	unsigned int surplus_huge_pages_node[MAX_NUMNODES];
688 	char name[HSTATE_NAME_LEN];
689 };
690 
691 struct cma;
692 
693 struct huge_bootmem_page {
694 	struct list_head list;
695 	struct hstate *hstate;
696 	unsigned long flags;
697 	struct cma *cma;
698 };
699 
700 #define HUGE_BOOTMEM_HVO		0x0001
701 #define HUGE_BOOTMEM_ZONES_VALID	0x0002
702 #define HUGE_BOOTMEM_CMA		0x0004
703 
704 bool hugetlb_bootmem_page_zones_valid(int nid, struct huge_bootmem_page *m);
705 
706 int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list);
707 int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn);
708 void wait_for_freed_hugetlb_folios(void);
709 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
710 				unsigned long addr, bool cow_from_owner);
711 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
712 				nodemask_t *nmask, gfp_t gfp_mask,
713 				bool allow_alloc_fallback);
714 struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
715 					  nodemask_t *nmask, gfp_t gfp_mask);
716 
717 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
718 			pgoff_t idx);
719 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
720 				unsigned long address, struct folio *folio);
721 
722 /* arch callback */
723 int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
724 int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
725 bool __init hugetlb_node_alloc_supported(void);
726 
727 void __init hugetlb_add_hstate(unsigned order);
728 bool __init arch_hugetlb_valid_size(unsigned long size);
729 struct hstate *size_to_hstate(unsigned long size);
730 
731 #ifndef HUGE_MAX_HSTATE
732 #define HUGE_MAX_HSTATE 1
733 #endif
734 
735 extern struct hstate hstates[HUGE_MAX_HSTATE];
736 extern unsigned int default_hstate_idx;
737 
738 #define default_hstate (hstates[default_hstate_idx])
739 
740 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
741 {
742 	return HUGETLBFS_SB(inode->i_sb)->spool;
743 }
744 
745 static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
746 {
747 	return folio->_hugetlb_subpool;
748 }
749 
750 static inline void hugetlb_set_folio_subpool(struct folio *folio,
751 					struct hugepage_subpool *subpool)
752 {
753 	folio->_hugetlb_subpool = subpool;
754 }
755 
756 static inline struct hstate *hstate_file(struct file *f)
757 {
758 	return hstate_inode(file_inode(f));
759 }
760 
761 static inline struct hstate *hstate_sizelog(int page_size_log)
762 {
763 	if (!page_size_log)
764 		return &default_hstate;
765 
766 	if (page_size_log < BITS_PER_LONG)
767 		return size_to_hstate(1UL << page_size_log);
768 
769 	return NULL;
770 }
771 
772 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
773 {
774 	return hstate_file(vma->vm_file);
775 }
776 
777 static inline unsigned long huge_page_size(const struct hstate *h)
778 {
779 	return (unsigned long)PAGE_SIZE << h->order;
780 }
781 
782 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
783 
784 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
785 
786 static inline unsigned long huge_page_mask(struct hstate *h)
787 {
788 	return h->mask;
789 }
790 
791 static inline unsigned int huge_page_order(struct hstate *h)
792 {
793 	return h->order;
794 }
795 
796 static inline unsigned huge_page_shift(struct hstate *h)
797 {
798 	return h->order + PAGE_SHIFT;
799 }
800 
801 static inline bool order_is_gigantic(unsigned int order)
802 {
803 	return order > MAX_PAGE_ORDER;
804 }
805 
806 static inline bool hstate_is_gigantic(struct hstate *h)
807 {
808 	return order_is_gigantic(huge_page_order(h));
809 }
810 
811 static inline unsigned int pages_per_huge_page(const struct hstate *h)
812 {
813 	return 1 << h->order;
814 }
815 
816 static inline unsigned int blocks_per_huge_page(struct hstate *h)
817 {
818 	return huge_page_size(h) / 512;
819 }
820 
821 static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
822 				struct address_space *mapping, pgoff_t idx)
823 {
824 	return filemap_lock_folio(mapping, idx << huge_page_order(h));
825 }
826 
827 #include <asm/hugetlb.h>
828 
829 #ifndef is_hugepage_only_range
830 static inline int is_hugepage_only_range(struct mm_struct *mm,
831 					unsigned long addr, unsigned long len)
832 {
833 	return 0;
834 }
835 #define is_hugepage_only_range is_hugepage_only_range
836 #endif
837 
838 #ifndef arch_clear_hugetlb_flags
839 static inline void arch_clear_hugetlb_flags(struct folio *folio) { }
840 #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
841 #endif
842 
843 #ifndef arch_make_huge_pte
844 static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
845 				       vm_flags_t flags)
846 {
847 	return pte_mkhuge(entry);
848 }
849 #endif
850 
851 #ifndef arch_has_huge_bootmem_alloc
852 /*
853  * Some architectures do their own bootmem allocation, so they can't use
854  * early CMA allocation.
855  */
856 static inline bool arch_has_huge_bootmem_alloc(void)
857 {
858 	return false;
859 }
860 #endif
861 
862 static inline struct hstate *folio_hstate(struct folio *folio)
863 {
864 	VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
865 	return size_to_hstate(folio_size(folio));
866 }
867 
868 static inline unsigned hstate_index_to_shift(unsigned index)
869 {
870 	return hstates[index].order + PAGE_SHIFT;
871 }
872 
873 static inline int hstate_index(struct hstate *h)
874 {
875 	return h - hstates;
876 }
877 
878 int dissolve_free_hugetlb_folio(struct folio *folio);
879 int dissolve_free_hugetlb_folios(unsigned long start_pfn,
880 				    unsigned long end_pfn);
881 
882 #ifdef CONFIG_MEMORY_FAILURE
883 extern void folio_clear_hugetlb_hwpoison(struct folio *folio);
884 #else
885 static inline void folio_clear_hugetlb_hwpoison(struct folio *folio)
886 {
887 }
888 #endif
889 
890 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
891 #ifndef arch_hugetlb_migration_supported
892 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
893 {
894 	if ((huge_page_shift(h) == PMD_SHIFT) ||
895 		(huge_page_shift(h) == PUD_SHIFT) ||
896 			(huge_page_shift(h) == PGDIR_SHIFT))
897 		return true;
898 	else
899 		return false;
900 }
901 #endif
902 #else
903 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
904 {
905 	return false;
906 }
907 #endif
908 
909 static inline bool hugepage_migration_supported(struct hstate *h)
910 {
911 	return arch_hugetlb_migration_supported(h);
912 }
913 
914 /*
915  * Movability check is different as compared to migration check.
916  * It determines whether or not a huge page should be placed on
917  * movable zone or not. Movability of any huge page should be
918  * required only if huge page size is supported for migration.
919  * There won't be any reason for the huge page to be movable if
920  * it is not migratable to start with. Also the size of the huge
921  * page should be large enough to be placed under a movable zone
922  * and still feasible enough to be migratable. Just the presence
923  * in movable zone does not make the migration feasible.
924  *
925  * So even though large huge page sizes like the gigantic ones
926  * are migratable they should not be movable because its not
927  * feasible to migrate them from movable zone.
928  */
929 static inline bool hugepage_movable_supported(struct hstate *h)
930 {
931 	if (!hugepage_migration_supported(h))
932 		return false;
933 
934 	if (hstate_is_gigantic(h) && !movable_gigantic_pages)
935 		return false;
936 	return true;
937 }
938 
939 /* Movability of hugepages depends on migration support. */
940 static inline gfp_t htlb_alloc_mask(struct hstate *h)
941 {
942 	gfp_t gfp = __GFP_COMP | __GFP_NOWARN;
943 
944 	gfp |= hugepage_movable_supported(h) ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER;
945 
946 	return gfp;
947 }
948 
949 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
950 {
951 	gfp_t modified_mask = htlb_alloc_mask(h);
952 
953 	/* Some callers might want to enforce node */
954 	modified_mask |= (gfp_mask & __GFP_THISNODE);
955 
956 	modified_mask |= (gfp_mask & __GFP_NOWARN);
957 
958 	return modified_mask;
959 }
960 
961 static inline bool htlb_allow_alloc_fallback(int reason)
962 {
963 	bool allowed_fallback = false;
964 
965 	/*
966 	 * Note: the memory offline, memory failure and migration syscalls will
967 	 * be allowed to fallback to other nodes due to lack of a better chioce,
968 	 * that might break the per-node hugetlb pool. While other cases will
969 	 * set the __GFP_THISNODE to avoid breaking the per-node hugetlb pool.
970 	 */
971 	switch (reason) {
972 	case MR_MEMORY_HOTPLUG:
973 	case MR_MEMORY_FAILURE:
974 	case MR_SYSCALL:
975 	case MR_MEMPOLICY_MBIND:
976 		allowed_fallback = true;
977 		break;
978 	default:
979 		break;
980 	}
981 
982 	return allowed_fallback;
983 }
984 
985 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
986 					   struct mm_struct *mm, pte_t *pte)
987 {
988 	const unsigned long size = huge_page_size(h);
989 
990 	VM_WARN_ON(size == PAGE_SIZE);
991 
992 	/*
993 	 * hugetlb must use the exact same PT locks as core-mm page table
994 	 * walkers would. When modifying a PTE table, hugetlb must take the
995 	 * PTE PT lock, when modifying a PMD table, hugetlb must take the PMD
996 	 * PT lock etc.
997 	 *
998 	 * The expectation is that any hugetlb folio smaller than a PMD is
999 	 * always mapped into a single PTE table and that any hugetlb folio
1000 	 * smaller than a PUD (but at least as big as a PMD) is always mapped
1001 	 * into a single PMD table.
1002 	 *
1003 	 * If that does not hold for an architecture, then that architecture
1004 	 * must disable split PT locks such that all *_lockptr() functions
1005 	 * will give us the same result: the per-MM PT lock.
1006 	 *
1007 	 * Note that with e.g., CONFIG_PGTABLE_LEVELS=2 where
1008 	 * PGDIR_SIZE==P4D_SIZE==PUD_SIZE==PMD_SIZE, we'd use pud_lockptr()
1009 	 * and core-mm would use pmd_lockptr(). However, in such configurations
1010 	 * split PMD locks are disabled -- they don't make sense on a single
1011 	 * PGDIR page table -- and the end result is the same.
1012 	 */
1013 	if (size >= PUD_SIZE)
1014 		return pud_lockptr(mm, (pud_t *) pte);
1015 	else if (size >= PMD_SIZE || IS_ENABLED(CONFIG_HIGHPTE))
1016 		return pmd_lockptr(mm, (pmd_t *) pte);
1017 	/* pte_alloc_huge() only applies with !CONFIG_HIGHPTE */
1018 	return ptep_lockptr(mm, pte);
1019 }
1020 
1021 #ifndef hugepages_supported
1022 /*
1023  * Some platform decide whether they support huge pages at boot
1024  * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
1025  * when there is no such support
1026  */
1027 #define hugepages_supported() (HPAGE_SHIFT != 0)
1028 #endif
1029 
1030 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
1031 
1032 static inline void hugetlb_count_init(struct mm_struct *mm)
1033 {
1034 	atomic_long_set(&mm->hugetlb_usage, 0);
1035 }
1036 
1037 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
1038 {
1039 	atomic_long_add(l, &mm->hugetlb_usage);
1040 }
1041 
1042 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1043 {
1044 	atomic_long_sub(l, &mm->hugetlb_usage);
1045 }
1046 
1047 #ifndef huge_ptep_modify_prot_start
1048 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
1049 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
1050 						unsigned long addr, pte_t *ptep)
1051 {
1052 	unsigned long psize = huge_page_size(hstate_vma(vma));
1053 
1054 	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
1055 }
1056 #endif
1057 
1058 #ifndef huge_ptep_modify_prot_commit
1059 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
1060 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
1061 						unsigned long addr, pte_t *ptep,
1062 						pte_t old_pte, pte_t pte)
1063 {
1064 	unsigned long psize = huge_page_size(hstate_vma(vma));
1065 
1066 	set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
1067 }
1068 #endif
1069 
1070 #ifdef CONFIG_NUMA
1071 void hugetlb_register_node(struct node *node);
1072 void hugetlb_unregister_node(struct node *node);
1073 #endif
1074 
1075 /*
1076  * Check if a given raw @page in a hugepage is HWPOISON.
1077  */
1078 bool is_raw_hwpoison_page_in_hugepage(struct page *page);
1079 
1080 static inline unsigned long huge_page_mask_align(struct file *file)
1081 {
1082 	return PAGE_MASK & ~huge_page_mask(hstate_file(file));
1083 }
1084 
1085 #else	/* CONFIG_HUGETLB_PAGE */
1086 struct hstate {};
1087 
1088 static inline unsigned long huge_page_mask_align(struct file *file)
1089 {
1090 	return 0;
1091 }
1092 
1093 static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
1094 {
1095 	return NULL;
1096 }
1097 
1098 static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
1099 				struct address_space *mapping, pgoff_t idx)
1100 {
1101 	return NULL;
1102 }
1103 
1104 static inline int isolate_or_dissolve_huge_folio(struct folio *folio,
1105 						struct list_head *list)
1106 {
1107 	return -ENOMEM;
1108 }
1109 
1110 static inline int replace_free_hugepage_folios(unsigned long start_pfn,
1111 		unsigned long end_pfn)
1112 {
1113 	return 0;
1114 }
1115 
1116 static inline void wait_for_freed_hugetlb_folios(void)
1117 {
1118 }
1119 
1120 static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
1121 					   unsigned long addr,
1122 					   bool cow_from_owner)
1123 {
1124 	return NULL;
1125 }
1126 
1127 static inline struct folio *
1128 alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
1129 			    nodemask_t *nmask, gfp_t gfp_mask)
1130 {
1131 	return NULL;
1132 }
1133 
1134 static inline struct folio *
1135 alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
1136 			nodemask_t *nmask, gfp_t gfp_mask,
1137 			bool allow_alloc_fallback)
1138 {
1139 	return NULL;
1140 }
1141 
1142 static inline int __alloc_bootmem_huge_page(struct hstate *h)
1143 {
1144 	return 0;
1145 }
1146 
1147 static inline struct hstate *hstate_file(struct file *f)
1148 {
1149 	return NULL;
1150 }
1151 
1152 static inline struct hstate *hstate_sizelog(int page_size_log)
1153 {
1154 	return NULL;
1155 }
1156 
1157 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
1158 {
1159 	return NULL;
1160 }
1161 
1162 static inline struct hstate *folio_hstate(struct folio *folio)
1163 {
1164 	return NULL;
1165 }
1166 
1167 static inline struct hstate *size_to_hstate(unsigned long size)
1168 {
1169 	return NULL;
1170 }
1171 
1172 static inline unsigned long huge_page_size(struct hstate *h)
1173 {
1174 	return PAGE_SIZE;
1175 }
1176 
1177 static inline unsigned long huge_page_mask(struct hstate *h)
1178 {
1179 	return PAGE_MASK;
1180 }
1181 
1182 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1183 {
1184 	return PAGE_SIZE;
1185 }
1186 
1187 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1188 {
1189 	return PAGE_SIZE;
1190 }
1191 
1192 static inline unsigned int huge_page_order(struct hstate *h)
1193 {
1194 	return 0;
1195 }
1196 
1197 static inline unsigned int huge_page_shift(struct hstate *h)
1198 {
1199 	return PAGE_SHIFT;
1200 }
1201 
1202 static inline bool hstate_is_gigantic(struct hstate *h)
1203 {
1204 	return false;
1205 }
1206 
1207 static inline unsigned int pages_per_huge_page(struct hstate *h)
1208 {
1209 	return 1;
1210 }
1211 
1212 static inline unsigned hstate_index_to_shift(unsigned index)
1213 {
1214 	return 0;
1215 }
1216 
1217 static inline int hstate_index(struct hstate *h)
1218 {
1219 	return 0;
1220 }
1221 
1222 static inline int dissolve_free_hugetlb_folio(struct folio *folio)
1223 {
1224 	return 0;
1225 }
1226 
1227 static inline int dissolve_free_hugetlb_folios(unsigned long start_pfn,
1228 					   unsigned long end_pfn)
1229 {
1230 	return 0;
1231 }
1232 
1233 static inline bool hugepage_migration_supported(struct hstate *h)
1234 {
1235 	return false;
1236 }
1237 
1238 static inline bool hugepage_movable_supported(struct hstate *h)
1239 {
1240 	return false;
1241 }
1242 
1243 static inline gfp_t htlb_alloc_mask(struct hstate *h)
1244 {
1245 	return 0;
1246 }
1247 
1248 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1249 {
1250 	return 0;
1251 }
1252 
1253 static inline bool htlb_allow_alloc_fallback(int reason)
1254 {
1255 	return false;
1256 }
1257 
1258 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1259 					   struct mm_struct *mm, pte_t *pte)
1260 {
1261 	return &mm->page_table_lock;
1262 }
1263 
1264 static inline void hugetlb_count_init(struct mm_struct *mm)
1265 {
1266 }
1267 
1268 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1269 {
1270 }
1271 
1272 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1273 {
1274 }
1275 
1276 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
1277 					  unsigned long addr, pte_t *ptep)
1278 {
1279 #ifdef CONFIG_MMU
1280 	return ptep_get(ptep);
1281 #else
1282 	return *ptep;
1283 #endif
1284 }
1285 
1286 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
1287 				   pte_t *ptep, pte_t pte, unsigned long sz)
1288 {
1289 }
1290 
1291 static inline void hugetlb_register_node(struct node *node)
1292 {
1293 }
1294 
1295 static inline void hugetlb_unregister_node(struct node *node)
1296 {
1297 }
1298 
1299 static inline bool hugetlbfs_pagecache_present(
1300     struct hstate *h, struct vm_area_struct *vma, unsigned long address)
1301 {
1302 	return false;
1303 }
1304 
1305 static inline void hugetlb_bootmem_alloc(void)
1306 {
1307 }
1308 #endif	/* CONFIG_HUGETLB_PAGE */
1309 
1310 static inline spinlock_t *huge_pte_lock(struct hstate *h,
1311 					struct mm_struct *mm, pte_t *pte)
1312 {
1313 	spinlock_t *ptl;
1314 
1315 	ptl = huge_pte_lockptr(h, mm, pte);
1316 	spin_lock(ptl);
1317 	return ptl;
1318 }
1319 
1320 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1321 extern void __init hugetlb_cma_reserve(void);
1322 #else
1323 static inline __init void hugetlb_cma_reserve(void)
1324 {
1325 }
1326 #endif
1327 
1328 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
1329 static inline bool hugetlb_pmd_shared(pte_t *pte)
1330 {
1331 	return ptdesc_pmd_is_shared(virt_to_ptdesc(pte));
1332 }
1333 #else
1334 static inline bool hugetlb_pmd_shared(pte_t *pte)
1335 {
1336 	return false;
1337 }
1338 #endif
1339 
1340 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1341 
1342 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1343 /*
1344  * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1345  * implement this.
1346  */
1347 #define flush_hugetlb_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
1348 #endif
1349 
1350 static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
1351 {
1352 	return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
1353 }
1354 
1355 bool __vma_private_lock(struct vm_area_struct *vma);
1356 
1357 /*
1358  * Safe version of huge_pte_offset() to check the locks.  See comments
1359  * above huge_pte_offset().
1360  */
1361 static inline pte_t *
1362 hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
1363 {
1364 #if defined(CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING) && defined(CONFIG_LOCKDEP)
1365 	struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1366 
1367 	/*
1368 	 * If pmd sharing possible, locking needed to safely walk the
1369 	 * hugetlb pgtables.  More information can be found at the comment
1370 	 * above huge_pte_offset() in the same file.
1371 	 *
1372 	 * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP.
1373 	 */
1374 	if (__vma_shareable_lock(vma))
1375 		WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) &&
1376 			     !lockdep_is_held(
1377 				 &vma->vm_file->f_mapping->i_mmap_rwsem));
1378 #endif
1379 	return huge_pte_offset(vma->vm_mm, addr, sz);
1380 }
1381 
1382 #endif /* _LINUX_HUGETLB_H */
1383