xref: /linux/include/linux/huge_mm.h (revision 00c010e130e58301db2ea0cec1eadc931e1cb8cf)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
4 
5 #include <linux/mm_types.h>
6 
7 #include <linux/fs.h> /* only for vma_is_dax() */
8 #include <linux/kobject.h>
9 
10 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 		  struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
14 void huge_pmd_set_accessed(struct vm_fault *vmf);
15 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 		  pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 		  struct vm_area_struct *vma);
18 
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
21 #else
huge_pud_set_accessed(struct vm_fault * vmf,pud_t orig_pud)22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23 {
24 }
25 #endif
26 
27 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
28 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
29 			   pmd_t *pmd, unsigned long addr, unsigned long next);
30 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
31 		 unsigned long addr);
32 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
33 		 unsigned long addr);
34 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
35 		   unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
36 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
37 		    pmd_t *pmd, unsigned long addr, pgprot_t newprot,
38 		    unsigned long cp_flags);
39 
40 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
41 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
42 vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
43 				bool write);
44 vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
45 				bool write);
46 
47 enum transparent_hugepage_flag {
48 	TRANSPARENT_HUGEPAGE_UNSUPPORTED,
49 	TRANSPARENT_HUGEPAGE_FLAG,
50 	TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
51 	TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
52 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
53 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
54 	TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
55 	TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
56 	TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
57 };
58 
59 struct kobject;
60 struct kobj_attribute;
61 
62 ssize_t single_hugepage_flag_store(struct kobject *kobj,
63 				   struct kobj_attribute *attr,
64 				   const char *buf, size_t count,
65 				   enum transparent_hugepage_flag flag);
66 ssize_t single_hugepage_flag_show(struct kobject *kobj,
67 				  struct kobj_attribute *attr, char *buf,
68 				  enum transparent_hugepage_flag flag);
69 extern struct kobj_attribute shmem_enabled_attr;
70 extern struct kobj_attribute thpsize_shmem_enabled_attr;
71 
72 /*
73  * Mask of all large folio orders supported for anonymous THP; all orders up to
74  * and including PMD_ORDER, except order-0 (which is not "huge") and order-1
75  * (which is a limitation of the THP implementation).
76  */
77 #define THP_ORDERS_ALL_ANON	((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
78 
79 /*
80  * Mask of all large folio orders supported for file THP. Folios in a DAX
81  * file is never split and the MAX_PAGECACHE_ORDER limit does not apply to
82  * it.  Same to PFNMAPs where there's neither page* nor pagecache.
83  */
84 #define THP_ORDERS_ALL_SPECIAL		\
85 	(BIT(PMD_ORDER) | BIT(PUD_ORDER))
86 #define THP_ORDERS_ALL_FILE_DEFAULT	\
87 	((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0))
88 
89 /*
90  * Mask of all large folio orders supported for THP.
91  */
92 #define THP_ORDERS_ALL	\
93 	(THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL | THP_ORDERS_ALL_FILE_DEFAULT)
94 
95 #define TVA_SMAPS		(1 << 0)	/* Will be used for procfs */
96 #define TVA_IN_PF		(1 << 1)	/* Page fault handler */
97 #define TVA_ENFORCE_SYSFS	(1 << 2)	/* Obey sysfs configuration */
98 
99 #define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
100 	(!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
101 
102 #define split_folio(f) split_folio_to_list(f, NULL)
103 
104 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
105 #define HPAGE_PMD_SHIFT PMD_SHIFT
106 #define HPAGE_PUD_SHIFT PUD_SHIFT
107 #else
108 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
109 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
110 #endif
111 
112 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
113 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
114 #define HPAGE_PMD_MASK	(~(HPAGE_PMD_SIZE - 1))
115 #define HPAGE_PMD_SIZE	((1UL) << HPAGE_PMD_SHIFT)
116 
117 #define HPAGE_PUD_ORDER (HPAGE_PUD_SHIFT-PAGE_SHIFT)
118 #define HPAGE_PUD_NR (1<<HPAGE_PUD_ORDER)
119 #define HPAGE_PUD_MASK	(~(HPAGE_PUD_SIZE - 1))
120 #define HPAGE_PUD_SIZE	((1UL) << HPAGE_PUD_SHIFT)
121 
122 enum mthp_stat_item {
123 	MTHP_STAT_ANON_FAULT_ALLOC,
124 	MTHP_STAT_ANON_FAULT_FALLBACK,
125 	MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
126 	MTHP_STAT_ZSWPOUT,
127 	MTHP_STAT_SWPIN,
128 	MTHP_STAT_SWPIN_FALLBACK,
129 	MTHP_STAT_SWPIN_FALLBACK_CHARGE,
130 	MTHP_STAT_SWPOUT,
131 	MTHP_STAT_SWPOUT_FALLBACK,
132 	MTHP_STAT_SHMEM_ALLOC,
133 	MTHP_STAT_SHMEM_FALLBACK,
134 	MTHP_STAT_SHMEM_FALLBACK_CHARGE,
135 	MTHP_STAT_SPLIT,
136 	MTHP_STAT_SPLIT_FAILED,
137 	MTHP_STAT_SPLIT_DEFERRED,
138 	MTHP_STAT_NR_ANON,
139 	MTHP_STAT_NR_ANON_PARTIALLY_MAPPED,
140 	__MTHP_STAT_COUNT
141 };
142 
143 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
144 struct mthp_stat {
145 	unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
146 };
147 
148 DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
149 
mod_mthp_stat(int order,enum mthp_stat_item item,int delta)150 static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
151 {
152 	if (order <= 0 || order > PMD_ORDER)
153 		return;
154 
155 	this_cpu_add(mthp_stats.stats[order][item], delta);
156 }
157 
count_mthp_stat(int order,enum mthp_stat_item item)158 static inline void count_mthp_stat(int order, enum mthp_stat_item item)
159 {
160 	mod_mthp_stat(order, item, 1);
161 }
162 
163 #else
mod_mthp_stat(int order,enum mthp_stat_item item,int delta)164 static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
165 {
166 }
167 
count_mthp_stat(int order,enum mthp_stat_item item)168 static inline void count_mthp_stat(int order, enum mthp_stat_item item)
169 {
170 }
171 #endif
172 
173 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
174 
175 extern unsigned long transparent_hugepage_flags;
176 extern unsigned long huge_anon_orders_always;
177 extern unsigned long huge_anon_orders_madvise;
178 extern unsigned long huge_anon_orders_inherit;
179 
hugepage_global_enabled(void)180 static inline bool hugepage_global_enabled(void)
181 {
182 	return transparent_hugepage_flags &
183 			((1<<TRANSPARENT_HUGEPAGE_FLAG) |
184 			(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
185 }
186 
hugepage_global_always(void)187 static inline bool hugepage_global_always(void)
188 {
189 	return transparent_hugepage_flags &
190 			(1<<TRANSPARENT_HUGEPAGE_FLAG);
191 }
192 
highest_order(unsigned long orders)193 static inline int highest_order(unsigned long orders)
194 {
195 	return fls_long(orders) - 1;
196 }
197 
next_order(unsigned long * orders,int prev)198 static inline int next_order(unsigned long *orders, int prev)
199 {
200 	*orders &= ~BIT(prev);
201 	return highest_order(*orders);
202 }
203 
204 /*
205  * Do the below checks:
206  *   - For file vma, check if the linear page offset of vma is
207  *     order-aligned within the file.  The hugepage is
208  *     guaranteed to be order-aligned within the file, but we must
209  *     check that the order-aligned addresses in the VMA map to
210  *     order-aligned offsets within the file, else the hugepage will
211  *     not be mappable.
212  *   - For all vmas, check if the haddr is in an aligned hugepage
213  *     area.
214  */
thp_vma_suitable_order(struct vm_area_struct * vma,unsigned long addr,int order)215 static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
216 		unsigned long addr, int order)
217 {
218 	unsigned long hpage_size = PAGE_SIZE << order;
219 	unsigned long haddr;
220 
221 	/* Don't have to check pgoff for anonymous vma */
222 	if (!vma_is_anonymous(vma)) {
223 		if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
224 				hpage_size >> PAGE_SHIFT))
225 			return false;
226 	}
227 
228 	haddr = ALIGN_DOWN(addr, hpage_size);
229 
230 	if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end)
231 		return false;
232 	return true;
233 }
234 
235 /*
236  * Filter the bitfield of input orders to the ones suitable for use in the vma.
237  * See thp_vma_suitable_order().
238  * All orders that pass the checks are returned as a bitfield.
239  */
thp_vma_suitable_orders(struct vm_area_struct * vma,unsigned long addr,unsigned long orders)240 static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
241 		unsigned long addr, unsigned long orders)
242 {
243 	int order;
244 
245 	/*
246 	 * Iterate over orders, highest to lowest, removing orders that don't
247 	 * meet alignment requirements from the set. Exit loop at first order
248 	 * that meets requirements, since all lower orders must also meet
249 	 * requirements.
250 	 */
251 
252 	order = highest_order(orders);
253 
254 	while (orders) {
255 		if (thp_vma_suitable_order(vma, addr, order))
256 			break;
257 		order = next_order(&orders, order);
258 	}
259 
260 	return orders;
261 }
262 
263 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
264 					 unsigned long vm_flags,
265 					 unsigned long tva_flags,
266 					 unsigned long orders);
267 
268 /**
269  * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
270  * @vma:  the vm area to check
271  * @vm_flags: use these vm_flags instead of vma->vm_flags
272  * @tva_flags: Which TVA flags to honour
273  * @orders: bitfield of all orders to consider
274  *
275  * Calculates the intersection of the requested hugepage orders and the allowed
276  * hugepage orders for the provided vma. Permitted orders are encoded as a set
277  * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3
278  * corresponds to order-3, etc). Order-0 is never considered a hugepage order.
279  *
280  * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage
281  * orders are allowed.
282  */
283 static inline
thp_vma_allowable_orders(struct vm_area_struct * vma,unsigned long vm_flags,unsigned long tva_flags,unsigned long orders)284 unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
285 				       unsigned long vm_flags,
286 				       unsigned long tva_flags,
287 				       unsigned long orders)
288 {
289 	/* Optimization to check if required orders are enabled early. */
290 	if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) {
291 		unsigned long mask = READ_ONCE(huge_anon_orders_always);
292 
293 		if (vm_flags & VM_HUGEPAGE)
294 			mask |= READ_ONCE(huge_anon_orders_madvise);
295 		if (hugepage_global_always() ||
296 		    ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
297 			mask |= READ_ONCE(huge_anon_orders_inherit);
298 
299 		orders &= mask;
300 		if (!orders)
301 			return 0;
302 	}
303 
304 	return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
305 }
306 
307 struct thpsize {
308 	struct kobject kobj;
309 	struct list_head node;
310 	int order;
311 };
312 
313 #define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
314 
315 #define transparent_hugepage_use_zero_page()				\
316 	(transparent_hugepage_flags &					\
317 	 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
318 
vma_thp_disabled(struct vm_area_struct * vma,unsigned long vm_flags)319 static inline bool vma_thp_disabled(struct vm_area_struct *vma,
320 		unsigned long vm_flags)
321 {
322 	/*
323 	 * Explicitly disabled through madvise or prctl, or some
324 	 * architectures may disable THP for some mappings, for
325 	 * example, s390 kvm.
326 	 */
327 	return (vm_flags & VM_NOHUGEPAGE) ||
328 	       test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags);
329 }
330 
thp_disabled_by_hw(void)331 static inline bool thp_disabled_by_hw(void)
332 {
333 	/* If the hardware/firmware marked hugepage support disabled. */
334 	return transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED);
335 }
336 
337 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
338 		unsigned long len, unsigned long pgoff, unsigned long flags);
339 unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
340 		unsigned long len, unsigned long pgoff, unsigned long flags,
341 		vm_flags_t vm_flags);
342 
343 bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
344 int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
345 		unsigned int new_order);
346 int min_order_for_split(struct folio *folio);
347 int split_folio_to_list(struct folio *folio, struct list_head *list);
348 bool uniform_split_supported(struct folio *folio, unsigned int new_order,
349 		bool warns);
350 bool non_uniform_split_supported(struct folio *folio, unsigned int new_order,
351 		bool warns);
352 int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
353 		struct list_head *list);
354 /*
355  * try_folio_split - try to split a @folio at @page using non uniform split.
356  * @folio: folio to be split
357  * @page: split to order-0 at the given page
358  * @list: store the after-split folios
359  *
360  * Try to split a @folio at @page using non uniform split to order-0, if
361  * non uniform split is not supported, fall back to uniform split.
362  *
363  * Return: 0: split is successful, otherwise split failed.
364  */
try_folio_split(struct folio * folio,struct page * page,struct list_head * list)365 static inline int try_folio_split(struct folio *folio, struct page *page,
366 		struct list_head *list)
367 {
368 	int ret = min_order_for_split(folio);
369 
370 	if (ret < 0)
371 		return ret;
372 
373 	if (!non_uniform_split_supported(folio, 0, false))
374 		return split_huge_page_to_list_to_order(&folio->page, list,
375 				ret);
376 	return folio_split(folio, ret, page, list);
377 }
split_huge_page(struct page * page)378 static inline int split_huge_page(struct page *page)
379 {
380 	struct folio *folio = page_folio(page);
381 	int ret = min_order_for_split(folio);
382 
383 	if (ret < 0)
384 		return ret;
385 
386 	/*
387 	 * split_huge_page() locks the page before splitting and
388 	 * expects the same page that has been split to be locked when
389 	 * returned. split_folio(page_folio(page)) cannot be used here
390 	 * because it converts the page to folio and passes the head
391 	 * page to be split.
392 	 */
393 	return split_huge_page_to_list_to_order(page, NULL, ret);
394 }
395 void deferred_split_folio(struct folio *folio, bool partially_mapped);
396 
397 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
398 		unsigned long address, bool freeze);
399 
400 #define split_huge_pmd(__vma, __pmd, __address)				\
401 	do {								\
402 		pmd_t *____pmd = (__pmd);				\
403 		if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)	\
404 					|| pmd_devmap(*____pmd))	\
405 			__split_huge_pmd(__vma, __pmd, __address,	\
406 					 false);			\
407 	}  while (0)
408 
409 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
410 		bool freeze);
411 
412 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
413 		unsigned long address);
414 
415 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
416 int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
417 		    pud_t *pudp, unsigned long addr, pgprot_t newprot,
418 		    unsigned long cp_flags);
419 #else
420 static inline int
change_huge_pud(struct mmu_gather * tlb,struct vm_area_struct * vma,pud_t * pudp,unsigned long addr,pgprot_t newprot,unsigned long cp_flags)421 change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
422 		pud_t *pudp, unsigned long addr, pgprot_t newprot,
423 		unsigned long cp_flags) { return 0; }
424 #endif
425 
426 #define split_huge_pud(__vma, __pud, __address)				\
427 	do {								\
428 		pud_t *____pud = (__pud);				\
429 		if (pud_trans_huge(*____pud)				\
430 					|| pud_devmap(*____pud))	\
431 			__split_huge_pud(__vma, __pud, __address);	\
432 	}  while (0)
433 
434 int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
435 		     int advice);
436 int madvise_collapse(struct vm_area_struct *vma,
437 		     struct vm_area_struct **prev,
438 		     unsigned long start, unsigned long end);
439 void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
440 			   unsigned long end, struct vm_area_struct *next);
441 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
442 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
443 
is_swap_pmd(pmd_t pmd)444 static inline int is_swap_pmd(pmd_t pmd)
445 {
446 	return !pmd_none(pmd) && !pmd_present(pmd);
447 }
448 
449 /* mmap_lock must be held on entry */
pmd_trans_huge_lock(pmd_t * pmd,struct vm_area_struct * vma)450 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
451 		struct vm_area_struct *vma)
452 {
453 	if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
454 		return __pmd_trans_huge_lock(pmd, vma);
455 	else
456 		return NULL;
457 }
pud_trans_huge_lock(pud_t * pud,struct vm_area_struct * vma)458 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
459 		struct vm_area_struct *vma)
460 {
461 	if (pud_trans_huge(*pud) || pud_devmap(*pud))
462 		return __pud_trans_huge_lock(pud, vma);
463 	else
464 		return NULL;
465 }
466 
467 /**
468  * folio_test_pmd_mappable - Can we map this folio with a PMD?
469  * @folio: The folio to test
470  */
folio_test_pmd_mappable(struct folio * folio)471 static inline bool folio_test_pmd_mappable(struct folio *folio)
472 {
473 	return folio_order(folio) >= HPAGE_PMD_ORDER;
474 }
475 
476 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
477 		pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
478 
479 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
480 
481 extern struct folio *huge_zero_folio;
482 extern unsigned long huge_zero_pfn;
483 
is_huge_zero_folio(const struct folio * folio)484 static inline bool is_huge_zero_folio(const struct folio *folio)
485 {
486 	return READ_ONCE(huge_zero_folio) == folio;
487 }
488 
is_huge_zero_pmd(pmd_t pmd)489 static inline bool is_huge_zero_pmd(pmd_t pmd)
490 {
491 	return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
492 }
493 
494 struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
495 void mm_put_huge_zero_folio(struct mm_struct *mm);
496 
thp_migration_supported(void)497 static inline bool thp_migration_supported(void)
498 {
499 	return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
500 }
501 
502 void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
503 			   pmd_t *pmd, bool freeze);
504 bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
505 			   pmd_t *pmdp, struct folio *folio);
506 
507 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
508 
folio_test_pmd_mappable(struct folio * folio)509 static inline bool folio_test_pmd_mappable(struct folio *folio)
510 {
511 	return false;
512 }
513 
thp_vma_suitable_order(struct vm_area_struct * vma,unsigned long addr,int order)514 static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
515 		unsigned long addr, int order)
516 {
517 	return false;
518 }
519 
thp_vma_suitable_orders(struct vm_area_struct * vma,unsigned long addr,unsigned long orders)520 static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
521 		unsigned long addr, unsigned long orders)
522 {
523 	return 0;
524 }
525 
thp_vma_allowable_orders(struct vm_area_struct * vma,unsigned long vm_flags,unsigned long tva_flags,unsigned long orders)526 static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
527 					unsigned long vm_flags,
528 					unsigned long tva_flags,
529 					unsigned long orders)
530 {
531 	return 0;
532 }
533 
534 #define transparent_hugepage_flags 0UL
535 
536 #define thp_get_unmapped_area	NULL
537 
538 static inline unsigned long
thp_get_unmapped_area_vmflags(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)539 thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
540 			      unsigned long len, unsigned long pgoff,
541 			      unsigned long flags, vm_flags_t vm_flags)
542 {
543 	return 0;
544 }
545 
546 static inline bool
can_split_folio(struct folio * folio,int caller_pins,int * pextra_pins)547 can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
548 {
549 	return false;
550 }
551 static inline int
split_huge_page_to_list_to_order(struct page * page,struct list_head * list,unsigned int new_order)552 split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
553 		unsigned int new_order)
554 {
555 	return 0;
556 }
split_huge_page(struct page * page)557 static inline int split_huge_page(struct page *page)
558 {
559 	return 0;
560 }
561 
split_folio_to_list(struct folio * folio,struct list_head * list)562 static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
563 {
564 	return 0;
565 }
566 
try_folio_split(struct folio * folio,struct page * page,struct list_head * list)567 static inline int try_folio_split(struct folio *folio, struct page *page,
568 		struct list_head *list)
569 {
570 	return 0;
571 }
572 
deferred_split_folio(struct folio * folio,bool partially_mapped)573 static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {}
574 #define split_huge_pmd(__vma, __pmd, __address)	\
575 	do { } while (0)
576 
__split_huge_pmd(struct vm_area_struct * vma,pmd_t * pmd,unsigned long address,bool freeze)577 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
578 		unsigned long address, bool freeze) {}
split_huge_pmd_address(struct vm_area_struct * vma,unsigned long address,bool freeze)579 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
580 		unsigned long address, bool freeze) {}
split_huge_pmd_locked(struct vm_area_struct * vma,unsigned long address,pmd_t * pmd,bool freeze)581 static inline void split_huge_pmd_locked(struct vm_area_struct *vma,
582 					 unsigned long address, pmd_t *pmd,
583 					 bool freeze) {}
584 
unmap_huge_pmd_locked(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,struct folio * folio)585 static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
586 					 unsigned long addr, pmd_t *pmdp,
587 					 struct folio *folio)
588 {
589 	return false;
590 }
591 
592 #define split_huge_pud(__vma, __pmd, __address)	\
593 	do { } while (0)
594 
hugepage_madvise(struct vm_area_struct * vma,unsigned long * vm_flags,int advice)595 static inline int hugepage_madvise(struct vm_area_struct *vma,
596 				   unsigned long *vm_flags, int advice)
597 {
598 	return -EINVAL;
599 }
600 
madvise_collapse(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end)601 static inline int madvise_collapse(struct vm_area_struct *vma,
602 				   struct vm_area_struct **prev,
603 				   unsigned long start, unsigned long end)
604 {
605 	return -EINVAL;
606 }
607 
vma_adjust_trans_huge(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct vm_area_struct * next)608 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
609 					 unsigned long start,
610 					 unsigned long end,
611 					 struct vm_area_struct *next)
612 {
613 }
is_swap_pmd(pmd_t pmd)614 static inline int is_swap_pmd(pmd_t pmd)
615 {
616 	return 0;
617 }
pmd_trans_huge_lock(pmd_t * pmd,struct vm_area_struct * vma)618 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
619 		struct vm_area_struct *vma)
620 {
621 	return NULL;
622 }
pud_trans_huge_lock(pud_t * pud,struct vm_area_struct * vma)623 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
624 		struct vm_area_struct *vma)
625 {
626 	return NULL;
627 }
628 
do_huge_pmd_numa_page(struct vm_fault * vmf)629 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
630 {
631 	return 0;
632 }
633 
is_huge_zero_folio(const struct folio * folio)634 static inline bool is_huge_zero_folio(const struct folio *folio)
635 {
636 	return false;
637 }
638 
is_huge_zero_pmd(pmd_t pmd)639 static inline bool is_huge_zero_pmd(pmd_t pmd)
640 {
641 	return false;
642 }
643 
mm_put_huge_zero_folio(struct mm_struct * mm)644 static inline void mm_put_huge_zero_folio(struct mm_struct *mm)
645 {
646 	return;
647 }
648 
follow_devmap_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,int flags,struct dev_pagemap ** pgmap)649 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
650 	unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
651 {
652 	return NULL;
653 }
654 
thp_migration_supported(void)655 static inline bool thp_migration_supported(void)
656 {
657 	return false;
658 }
659 
highest_order(unsigned long orders)660 static inline int highest_order(unsigned long orders)
661 {
662 	return 0;
663 }
664 
next_order(unsigned long * orders,int prev)665 static inline int next_order(unsigned long *orders, int prev)
666 {
667 	return 0;
668 }
669 
__split_huge_pud(struct vm_area_struct * vma,pud_t * pud,unsigned long address)670 static inline void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
671 				    unsigned long address)
672 {
673 }
674 
change_huge_pud(struct mmu_gather * tlb,struct vm_area_struct * vma,pud_t * pudp,unsigned long addr,pgprot_t newprot,unsigned long cp_flags)675 static inline int change_huge_pud(struct mmu_gather *tlb,
676 				  struct vm_area_struct *vma, pud_t *pudp,
677 				  unsigned long addr, pgprot_t newprot,
678 				  unsigned long cp_flags)
679 {
680 	return 0;
681 }
682 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
683 
split_folio_to_list_to_order(struct folio * folio,struct list_head * list,int new_order)684 static inline int split_folio_to_list_to_order(struct folio *folio,
685 		struct list_head *list, int new_order)
686 {
687 	return split_huge_page_to_list_to_order(&folio->page, list, new_order);
688 }
689 
split_folio_to_order(struct folio * folio,int new_order)690 static inline int split_folio_to_order(struct folio *folio, int new_order)
691 {
692 	return split_folio_to_list_to_order(folio, NULL, new_order);
693 }
694 
695 #endif /* _LINUX_HUGE_MM_H */
696