1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * A test for the patch "Allow compaction of unevictable pages". 5 * With this patch we should be able to allocate at least 1/4 6 * of RAM in huge pages. Without the patch much less is 7 * allocated. 8 */ 9 10 #include <stdio.h> 11 #include <stdlib.h> 12 #include <sys/mman.h> 13 #include <sys/resource.h> 14 #include <fcntl.h> 15 #include <errno.h> 16 #include <unistd.h> 17 #include <string.h> 18 19 #include "../kselftest.h" 20 21 #define MAP_SIZE_MB 100 22 #define MAP_SIZE (MAP_SIZE_MB * 1024 * 1024) 23 24 struct map_list { 25 void *map; 26 struct map_list *next; 27 }; 28 29 int read_memory_info(unsigned long *memfree, unsigned long *hugepagesize) 30 { 31 char buffer[256] = {0}; 32 char *cmd = "cat /proc/meminfo | grep -i memfree | grep -o '[0-9]*'"; 33 FILE *cmdfile = popen(cmd, "r"); 34 35 if (!(fgets(buffer, sizeof(buffer), cmdfile))) { 36 ksft_print_msg("Failed to read meminfo: %s\n", strerror(errno)); 37 return -1; 38 } 39 40 pclose(cmdfile); 41 42 *memfree = atoll(buffer); 43 cmd = "cat /proc/meminfo | grep -i hugepagesize | grep -o '[0-9]*'"; 44 cmdfile = popen(cmd, "r"); 45 46 if (!(fgets(buffer, sizeof(buffer), cmdfile))) { 47 ksft_print_msg("Failed to read meminfo: %s\n", strerror(errno)); 48 return -1; 49 } 50 51 pclose(cmdfile); 52 *hugepagesize = atoll(buffer); 53 54 return 0; 55 } 56 57 int prereq(void) 58 { 59 char allowed; 60 int fd; 61 62 fd = open("/proc/sys/vm/compact_unevictable_allowed", 63 O_RDONLY | O_NONBLOCK); 64 if (fd < 0) { 65 ksft_print_msg("Failed to open /proc/sys/vm/compact_unevictable_allowed: %s\n", 66 strerror(errno)); 67 return -1; 68 } 69 70 if (read(fd, &allowed, sizeof(char)) != sizeof(char)) { 71 ksft_print_msg("Failed to read from /proc/sys/vm/compact_unevictable_allowed: %s\n", 72 strerror(errno)); 73 close(fd); 74 return -1; 75 } 76 77 close(fd); 78 if (allowed == '1') 79 return 0; 80 81 ksft_print_msg("Compaction isn't allowed\n"); 82 return -1; 83 } 84 85 int check_compaction(unsigned long mem_free, unsigned int hugepage_size) 86 { 87 int fd, ret = -1; 88 int compaction_index = 0; 89 char initial_nr_hugepages[10] = {0}; 90 char nr_hugepages[10] = {0}; 91 92 /* We want to test with 80% of available memory. Else, OOM killer comes 93 in to play */ 94 mem_free = mem_free * 0.8; 95 96 fd = open("/proc/sys/vm/nr_hugepages", O_RDWR | O_NONBLOCK); 97 if (fd < 0) { 98 ksft_test_result_fail("Failed to open /proc/sys/vm/nr_hugepages: %s\n", 99 strerror(errno)); 100 return -1; 101 } 102 103 if (read(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages)) <= 0) { 104 ksft_test_result_fail("Failed to read from /proc/sys/vm/nr_hugepages: %s\n", 105 strerror(errno)); 106 goto close_fd; 107 } 108 109 /* Start with the initial condition of 0 huge pages*/ 110 if (write(fd, "0", sizeof(char)) != sizeof(char)) { 111 ksft_test_result_fail("Failed to write 0 to /proc/sys/vm/nr_hugepages: %s\n", 112 strerror(errno)); 113 goto close_fd; 114 } 115 116 lseek(fd, 0, SEEK_SET); 117 118 /* Request a large number of huge pages. The Kernel will allocate 119 as much as it can */ 120 if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) { 121 ksft_test_result_fail("Failed to write 100000 to /proc/sys/vm/nr_hugepages: %s\n", 122 strerror(errno)); 123 goto close_fd; 124 } 125 126 lseek(fd, 0, SEEK_SET); 127 128 if (read(fd, nr_hugepages, sizeof(nr_hugepages)) <= 0) { 129 ksft_test_result_fail("Failed to re-read from /proc/sys/vm/nr_hugepages: %s\n", 130 strerror(errno)); 131 goto close_fd; 132 } 133 134 /* We should have been able to request at least 1/3 rd of the memory in 135 huge pages */ 136 compaction_index = mem_free/(atoi(nr_hugepages) * hugepage_size); 137 138 lseek(fd, 0, SEEK_SET); 139 140 if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages)) 141 != strlen(initial_nr_hugepages)) { 142 ksft_test_result_fail("Failed to write value to /proc/sys/vm/nr_hugepages: %s\n", 143 strerror(errno)); 144 goto close_fd; 145 } 146 147 if (compaction_index > 3) { 148 ksft_print_msg("ERROR: Less that 1/%d of memory is available\n" 149 "as huge pages\n", compaction_index); 150 ksft_test_result_fail("No of huge pages allocated = %d\n", (atoi(nr_hugepages))); 151 goto close_fd; 152 } 153 154 ksft_test_result_pass("Memory compaction succeeded. No of huge pages allocated = %d\n", 155 (atoi(nr_hugepages))); 156 ret = 0; 157 158 close_fd: 159 close(fd); 160 return ret; 161 } 162 163 164 int main(int argc, char **argv) 165 { 166 struct rlimit lim; 167 struct map_list *list = NULL, *entry; 168 size_t page_size, i; 169 void *map = NULL; 170 unsigned long mem_free = 0; 171 unsigned long hugepage_size = 0; 172 long mem_fragmentable_MB = 0; 173 174 ksft_print_header(); 175 176 if (prereq() || geteuid()) 177 return ksft_exit_pass(); 178 179 ksft_set_plan(1); 180 181 lim.rlim_cur = RLIM_INFINITY; 182 lim.rlim_max = RLIM_INFINITY; 183 if (setrlimit(RLIMIT_MEMLOCK, &lim)) 184 ksft_exit_fail_msg("Failed to set rlimit: %s\n", strerror(errno)); 185 186 page_size = getpagesize(); 187 188 if (read_memory_info(&mem_free, &hugepage_size) != 0) 189 ksft_exit_fail_msg("Failed to get meminfo\n"); 190 191 mem_fragmentable_MB = mem_free * 0.8 / 1024; 192 193 while (mem_fragmentable_MB > 0) { 194 map = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE, 195 MAP_ANONYMOUS | MAP_PRIVATE | MAP_LOCKED, -1, 0); 196 if (map == MAP_FAILED) 197 break; 198 199 entry = malloc(sizeof(struct map_list)); 200 if (!entry) { 201 munmap(map, MAP_SIZE); 202 break; 203 } 204 entry->map = map; 205 entry->next = list; 206 list = entry; 207 208 /* Write something (in this case the address of the map) to 209 * ensure that KSM can't merge the mapped pages 210 */ 211 for (i = 0; i < MAP_SIZE; i += page_size) 212 *(unsigned long *)(map + i) = (unsigned long)map + i; 213 214 mem_fragmentable_MB -= MAP_SIZE_MB; 215 } 216 217 for (entry = list; entry != NULL; entry = entry->next) { 218 munmap(entry->map, MAP_SIZE); 219 if (!entry->next) 220 break; 221 entry = entry->next; 222 } 223 224 if (check_compaction(mem_free, hugepage_size) == 0) 225 return ksft_exit_pass(); 226 227 return ksft_exit_fail(); 228 } 229