Lines Matching defs:p
46 void (*cleanup_area)(void *p, unsigned long size);
47 void (*fault)(void *p, unsigned long start, unsigned long end);
57 void (*collapse)(const char *msg, char *p, int nr_hpages,
260 void *p;
262 p = mmap(BASE_ADDR, nr * hpage_pmd_size, PROT_READ | PROT_WRITE,
264 if (p != BASE_ADDR) {
265 printf("Failed to allocate VMA at %p\n", BASE_ADDR);
269 return p;
272 static void fill_memory(int *p, unsigned long start, unsigned long end)
277 p[i * page_size / sizeof(*p)] = i + 0xdead0000;
286 static int madvise_collapse_retry(void *p, unsigned long size)
292 ret = madvise(p, size, MADV_COLLAPSE);
306 void *p = ops->setup_area(1);
308 ops->fault(p, 0, hpage_pmd_size);
318 if (madvise_collapse_retry(p, hpage_pmd_size)) {
322 if (!ops->check_huge(p, 1)) {
326 if (madvise(p, hpage_pmd_size, MADV_HUGEPAGE)) {
331 return p;
334 static void validate_memory(int *p, unsigned long start, unsigned long end)
339 if (p[i * page_size / sizeof(*p)] != i + 0xdead0000) {
341 i, p[i * page_size / sizeof(*p)]);
352 static void anon_cleanup_area(void *p, unsigned long size)
354 munmap(p, size);
357 static void anon_fault(void *p, unsigned long start, unsigned long end)
359 fill_memory(p, start, end);
370 void *p;
384 p = alloc_mapping(nr_hpages);
385 fill_memory(p, 0, size);
386 write(fd, p, size);
388 munmap(p, size);
397 p = mmap(BASE_ADDR, size, PROT_READ | PROT_EXEC,
399 if (p == MAP_FAILED || p != BASE_ADDR) {
407 return p;
410 static void file_cleanup_area(void *p, unsigned long size)
412 munmap(p, size);
417 static void file_fault(void *p, unsigned long start, unsigned long end)
419 if (madvise(((char *)p) + start, end - start, MADV_POPULATE_READ)) {
440 void *p;
452 p = mmap(BASE_ADDR, size, PROT_READ | PROT_WRITE, MAP_SHARED, finfo.fd,
454 if (p != BASE_ADDR) {
458 return p;
461 static void shmem_cleanup_area(void *p, unsigned long size)
463 munmap(p, size);
496 static void __madvise_collapse(const char *msg, char *p, int nr_hpages,
513 madvise(p, nr_hpages * hpage_pmd_size, MADV_HUGEPAGE);
514 ret = madvise_collapse_retry(p, nr_hpages * hpage_pmd_size);
517 else if (!ops->check_huge(p, expect ? nr_hpages : 0))
525 static void madvise_collapse(const char *msg, char *p, int nr_hpages,
529 if (!ops->check_huge(p, 0)) {
533 __madvise_collapse(msg, p, nr_hpages, ops, expect);
537 static bool wait_for_scan(const char *msg, char *p, int nr_hpages,
544 if (!ops->check_huge(p, 0)) {
549 madvise(p, nr_hpages * hpage_pmd_size, MADV_HUGEPAGE);
556 if (ops->check_huge(p, nr_hpages))
567 static void khugepaged_collapse(const char *msg, char *p, int nr_hpages,
570 if (wait_for_scan(msg, p, nr_hpages, ops)) {
584 ops->fault(p, 0, nr_hpages * hpage_pmd_size);
586 if (ops->check_huge(p, expect ? nr_hpages : 0))
617 char *p;
622 p = alloc_mapping(1);
623 *p = 1;
625 if (check_huge_anon(p, 1, hpage_pmd_size))
632 madvise(p, page_size, MADV_DONTNEED);
634 if (check_huge_anon(p, 0, hpage_pmd_size))
638 munmap(p, hpage_pmd_size);
643 void *p;
647 p = ops->setup_area(nr_hpages);
648 ops->fault(p, 0, size);
649 c->collapse("Collapse multiple fully populated PTE table", p, nr_hpages,
651 validate_memory(p, 0, size);
652 ops->cleanup_area(p, size);
657 void *p;
659 p = ops->setup_area(1);
660 c->collapse("Do not collapse empty PTE table", p, 1, ops, false);
661 ops->cleanup_area(p, hpage_pmd_size);
666 void *p;
668 p = ops->setup_area(1);
669 ops->fault(p, 0, page_size);
670 c->collapse("Collapse PTE table with single PTE entry present", p,
672 ops->cleanup_area(p, hpage_pmd_size);
679 void *p;
685 p = ops->setup_area(1);
694 ops->fault(p, 0, (hpage_pmd_nr - max_ptes_none - fault_nr_pages) * page_size);
695 c->collapse("Maybe collapse with max_ptes_none exceeded", p, 1,
697 validate_memory(p, 0, (hpage_pmd_nr - max_ptes_none - fault_nr_pages) * page_size);
700 ops->fault(p, 0, (hpage_pmd_nr - max_ptes_none) * page_size);
701 c->collapse("Collapse with max_ptes_none PTEs empty", p, 1, ops,
703 validate_memory(p, 0,
707 ops->cleanup_area(p, hpage_pmd_size);
713 void *p;
715 p = ops->setup_area(1);
716 ops->fault(p, 0, hpage_pmd_size);
719 if (madvise(p, page_size, MADV_PAGEOUT)) {
723 if (check_swap(p, page_size)) {
730 c->collapse("Collapse with swapping in single PTE entry", p, 1, ops,
732 validate_memory(p, 0, hpage_pmd_size);
734 ops->cleanup_area(p, hpage_pmd_size);
740 void *p;
742 p = ops->setup_area(1);
743 ops->fault(p, 0, hpage_pmd_size);
746 if (madvise(p, (max_ptes_swap + 1) * page_size, MADV_PAGEOUT)) {
750 if (check_swap(p, (max_ptes_swap + 1) * page_size)) {
757 c->collapse("Maybe collapse with max_ptes_swap exceeded", p, 1, ops,
759 validate_memory(p, 0, hpage_pmd_size);
762 ops->fault(p, 0, hpage_pmd_size);
765 if (madvise(p, max_ptes_swap * page_size, MADV_PAGEOUT)) {
769 if (check_swap(p, max_ptes_swap * page_size)) {
776 c->collapse("Collapse with max_ptes_swap pages swapped out", p,
778 validate_memory(p, 0, hpage_pmd_size);
781 ops->cleanup_area(p, hpage_pmd_size);
786 void *p;
788 p = alloc_hpage(ops);
797 madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE);
799 madvise(p + page_size, hpage_pmd_size - page_size, MADV_DONTNEED);
800 if (ops->check_huge(p, 0))
806 p, 1, ops, true);
807 validate_memory(p, 0, page_size);
809 ops->cleanup_area(p, hpage_pmd_size);
814 void *p;
816 p = alloc_hpage(ops);
818 madvise(p, page_size, MADV_NOHUGEPAGE);
819 madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE);
820 if (ops->check_huge(p, 0))
825 c->collapse("Collapse PTE table full of compound pages", p, 1, ops,
827 validate_memory(p, 0, hpage_pmd_size);
828 ops->cleanup_area(p, hpage_pmd_size);
833 void *p;
836 p = ops->setup_area(1);
849 p = mremap(BASE_ADDR - i * page_size,
854 if (p == MAP_FAILED) {
859 p = mremap(BASE_ADDR + 2 * hpage_pmd_size,
864 if (p == MAP_FAILED) {
871 ops->fault(p, 0, hpage_pmd_size);
872 if (!ops->check_huge(p, 1))
877 c->collapse("Collapse PTE table full of different compound pages", p, 1,
880 validate_memory(p, 0, hpage_pmd_size);
881 ops->cleanup_area(p, hpage_pmd_size);
887 void *p;
889 p = ops->setup_area(1);
892 ops->fault(p, 0, page_size);
893 if (ops->check_huge(p, 0))
904 if (ops->check_huge(p, 0))
909 ops->fault(p, page_size, 2 * page_size);
911 p, 1, ops, true);
913 validate_memory(p, 0, page_size);
914 ops->cleanup_area(p, hpage_pmd_size);
922 if (ops->check_huge(p, 0))
926 validate_memory(p, 0, page_size);
927 ops->cleanup_area(p, hpage_pmd_size);
933 void *p;
935 p = alloc_hpage(ops);
942 if (ops->check_huge(p, 1))
948 madvise(p, page_size, MADV_NOHUGEPAGE);
949 madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE);
950 if (ops->check_huge(p, 0))
954 ops->fault(p, 0, page_size);
958 p, 1, ops, true);
962 validate_memory(p, 0, hpage_pmd_size);
963 ops->cleanup_area(p, hpage_pmd_size);
971 if (ops->check_huge(p, 1))
975 validate_memory(p, 0, hpage_pmd_size);
976 ops->cleanup_area(p, hpage_pmd_size);
983 void *p;
985 p = alloc_hpage(ops);
992 if (ops->check_huge(p, 1))
999 ops->fault(p, 0, (hpage_pmd_nr - max_ptes_shared - 1) * page_size);
1000 if (ops->check_huge(p, 0))
1005 c->collapse("Maybe collapse with max_ptes_shared exceeded", p,
1011 ops->fault(p, 0, (hpage_pmd_nr - max_ptes_shared) *
1013 if (ops->check_huge(p, 0))
1019 p, 1, ops, true);
1022 validate_memory(p, 0, hpage_pmd_size);
1023 ops->cleanup_area(p, hpage_pmd_size);
1031 if (ops->check_huge(p, 1))
1035 validate_memory(p, 0, hpage_pmd_size);
1036 ops->cleanup_area(p, hpage_pmd_size);
1042 void *p;
1044 p = ops->setup_area(1);
1045 ops->fault(p, 0, hpage_pmd_size);
1046 c->collapse("Collapse fully populated PTE table...", p, 1, ops, true);
1047 validate_memory(p, 0, hpage_pmd_size);
1050 __madvise_collapse("Re-collapse PMD-mapped hugepage", p, 1, ops, true);
1051 validate_memory(p, 0, hpage_pmd_size);
1052 ops->cleanup_area(p, hpage_pmd_size);
1062 void *p;
1066 p = ops->setup_area(nr_hpages);
1067 ops->fault(p, 0, size);
1070 if (wait_for_scan("Collapse and leave PMD cleared", p, nr_hpages,
1076 c->collapse("Install huge PMD from page cache", p, nr_hpages, ops,
1078 validate_memory(p, 0, size);
1079 ops->cleanup_area(p, size);