Lines Matching defs:h

13 #include <linux/pgtable.h>
14 #include <linux/moduleparam.h>
15 #include <linux/bootmem_info.h>
16 #include <linux/mmdebug.h>
17 #include <linux/pagewalk.h>
18 #include <asm/pgalloc.h>
19 #include <asm/tlbflush.h>
20 #include "hugetlb_vmemmap.h"
453 static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
469 vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
493 * @h: struct hstate.
499 int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio)
501 return __hugetlb_vmemmap_restore_folio(h, folio, VMEMMAP_SYNCHRONIZE_RCU);
506 * @h: hstate.
517 long hugetlb_vmemmap_restore_folios(const struct hstate *h,
528 ret = __hugetlb_vmemmap_restore_folio(h, folio, flags);
549 static bool vmemmap_should_optimize_folio(const struct hstate *h, struct folio *folio)
557 if (!hugetlb_vmemmap_optimizable(h))
563 static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h,
575 if (!vmemmap_should_optimize_folio(h, folio))
595 vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
617 * @h: struct hstate.
625 void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio)
629 __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, VMEMMAP_SYNCHRONIZE_RCU);
633 static int hugetlb_vmemmap_split_folio(const struct hstate *h, struct folio *folio)
638 if (!vmemmap_should_optimize_folio(h, folio))
641 vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
652 static void __hugetlb_vmemmap_optimize_folios(struct hstate *h,
672 epfn = spfn + pages_per_huge_page(h);
684 ret = hugetlb_vmemmap_split_folio(h, folio);
711 ret = __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, flags);
727 __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, flags);
736 void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
738 __hugetlb_vmemmap_optimize_folios(h, folio_list, false);
741 void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, struct list_head *folio_list)
743 __hugetlb_vmemmap_optimize_folios(h, folio_list, true);
846 struct hstate *h;
860 h = m->hstate;
862 nr_pages = pages_per_huge_page(h);
881 memblock_phys_free(phys, huge_page_size(h));
901 const struct hstate *h;
906 for_each_hstate(h) {
907 if (hugetlb_vmemmap_optimizable(h)) {