Lines Matching +full:memory +full:- +full:to +full:- +full:memory

1 /* SPDX-License-Identifier: GPL-2.0 */
29 * the memory controller.
37 /* Create two nested cgroups with the memory controller enabled */ in test_memcg_subtree_control()
46 if (cg_write(parent, "cgroup.subtree_control", "+memory")) in test_memcg_subtree_control()
52 if (cg_read_strstr(child, "cgroup.controllers", "memory")) in test_memcg_subtree_control()
55 /* Create two nested cgroups without enabling memory controller */ in test_memcg_subtree_control()
70 if (!cg_read_strstr(child2, "cgroup.controllers", "memory")) in test_memcg_subtree_control()
98 int ret = -1; in alloc_anon_50M_check()
103 return -1; in alloc_anon_50M_check()
109 current = cg_read_long(cgroup, "memory.current"); in alloc_anon_50M_check()
116 anon = cg_read_key_long(cgroup, "memory.stat", "anon "); in alloc_anon_50M_check()
132 int ret = -1; in alloc_pagecache_50M_check()
138 return -1; in alloc_pagecache_50M_check()
143 current = cg_read_long(cgroup, "memory.current"); in alloc_pagecache_50M_check()
147 file = cg_read_key_long(cgroup, "memory.stat", "file "); in alloc_pagecache_50M_check()
162 * This test create a memory cgroup, allocates
163 * some anonymous memory and some pagecache
164 * and checks memory.current, memory.peak, and some memory.stat values.
172 int peak_fd = -1, peak_fd2 = -1, peak_fd3 = -1, peak_fd4 = -1; in test_memcg_current_peak()
182 current = cg_read_long(memcg, "memory.current"); in test_memcg_current_peak()
186 peak = cg_read_long(memcg, "memory.peak"); in test_memcg_current_peak()
193 peak = cg_read_long(memcg, "memory.peak"); in test_memcg_current_peak()
198 * We'll open a few FDs for the same memory.peak file to exercise the free-path in test_memcg_current_peak()
199 * We need at least three to be closed in a different order than writes occurred to test in test_memcg_current_peak()
200 * the linked-list handling. in test_memcg_current_peak()
202 peak_fd = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC); in test_memcg_current_peak()
204 if (peak_fd == -1) { in test_memcg_current_peak()
211 * Before we try to use memory.peak's fd, try to figure out whether in test_memcg_current_peak()
212 * this kernel supports writing to that file in the first place. (by in test_memcg_current_peak()
223 peak_fd2 = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC); in test_memcg_current_peak()
225 if (peak_fd2 == -1) in test_memcg_current_peak()
228 peak_fd3 = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC); in test_memcg_current_peak()
230 if (peak_fd3 == -1) in test_memcg_current_peak()
233 /* any non-empty string resets, but make it clear */ in test_memcg_current_peak()
248 /* Make sure a completely independent read isn't affected by our FD-local reset above*/ in test_memcg_current_peak()
249 peak = cg_read_long(memcg, "memory.peak"); in test_memcg_current_peak()
257 peak_fd4 = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC); in test_memcg_current_peak()
259 if (peak_fd4 == -1) in test_memcg_current_peak()
273 peak = cg_read_long(memcg, "memory.peak"); in test_memcg_current_peak()
277 /* Make sure everything is back to normal */ in test_memcg_current_peak()
316 return -1; in alloc_pagecache_50M_noexit()
333 return -1; in alloc_anon_noexit()
354 for (limit = 10; limit > 0; limit--) { in cg_test_proc_killed()
360 return -1; in cg_test_proc_killed()
367 * A memory.min = 0, memory.max = 200M
368 * A/B memory.min = 50M
369 * A/B/C memory.min = 75M, memory.current = 50M
370 * A/B/D memory.min = 25M, memory.current = 50M
371 * A/B/E memory.min = 0, memory.current = 50M
372 * A/B/F memory.min = 500M, memory.current = 0
374 * (or memory.low if we test soft protection)
379 * memory pressure in A.
381 * Then it checks actual memory usages and expects that:
382 * A/B memory.current ~= 50M
383 * A/B/C memory.current ~= 29M
384 * A/B/D memory.current ~= 21M
385 * A/B/E memory.current ~= 0
386 * A/B/F memory.current = 0
389 * After that it tries to allocate more than there is
390 * unprotected memory in A available, and checks that:
391 * a) memory.min protects pagecache even in this case,
392 * b) memory.low allows reclaiming page cache with low events.
394 * Then we try to reclaim from A/B/C using memory.reclaim until its
405 const char *attribute = min ? "memory.min" : "memory.low"; in test_memcg_protection()
431 /* No memory.min on older kernels is fine */ in test_memcg_protection()
437 if (cg_write(parent[0], "cgroup.subtree_control", "+memory")) in test_memcg_protection()
440 if (cg_write(parent[0], "memory.max", "200M")) in test_memcg_protection()
443 if (cg_write(parent[0], "memory.swap.max", "0")) in test_memcg_protection()
449 if (cg_write(parent[1], "cgroup.subtree_control", "+memory")) in test_memcg_protection()
482 while (!values_close(cg_read_long(parent[1], "memory.current"), in test_memcg_protection()
492 if (!values_close(cg_read_long(parent[1], "memory.current"), MB(50), 3)) in test_memcg_protection()
496 c[i] = cg_read_long(children[i], "memory.current"); in test_memcg_protection()
512 "memory.low prevents from allocating anon memory\n"); in test_memcg_protection()
517 if (!values_close(cg_read_long(parent[1], "memory.current"), current, 3)) in test_memcg_protection()
532 oom = cg_read_key_long(children[i], "memory.events", "oom "); in test_memcg_protection()
533 low = cg_read_key_long(children[i], "memory.events", "low "); in test_memcg_protection()
547 for (i = ARRAY_SIZE(children) - 1; i >= 0; i--) { in test_memcg_protection()
555 for (i = ARRAY_SIZE(parent) - 1; i >= 0; i--) { in test_memcg_protection()
579 int ret = -1; in alloc_pagecache_max_30M()
583 high = cg_read_long(cgroup, "memory.high"); in alloc_pagecache_max_30M()
584 max = cg_read_long(cgroup, "memory.max"); in alloc_pagecache_max_30M()
586 return -1; in alloc_pagecache_max_30M()
590 return -1; in alloc_pagecache_max_30M()
595 current = cg_read_long(cgroup, "memory.current"); in alloc_pagecache_max_30M()
608 * This test checks that memory.high limits the amount of
609 * memory which can be consumed by either anonymous memory
625 if (cg_read_strcmp(memcg, "memory.high", "max\n")) in test_memcg_high()
628 if (cg_write(memcg, "memory.swap.max", "0")) in test_memcg_high()
631 if (cg_write(memcg, "memory.high", "30M")) in test_memcg_high()
643 high = cg_read_key_long(memcg, "memory.events", "high "); in test_memcg_high()
664 return -1; in alloc_anon_mlock()
672 * This test checks that memory.high is able to throttle big single shot
677 int ret = KSFT_FAIL, pid, fd = -1; in test_memcg_high_sync()
689 pre_high = cg_read_key_long(memcg, "memory.events", "high "); in test_memcg_high_sync()
690 pre_max = cg_read_key_long(memcg, "memory.events", "max "); in test_memcg_high_sync()
694 if (cg_write(memcg, "memory.swap.max", "0")) in test_memcg_high_sync()
697 if (cg_write(memcg, "memory.high", "30M")) in test_memcg_high_sync()
700 if (cg_write(memcg, "memory.max", "140M")) in test_memcg_high_sync()
713 post_high = cg_read_key_long(memcg, "memory.events", "high "); in test_memcg_high_sync()
714 post_max = cg_read_key_long(memcg, "memory.events", "max "); in test_memcg_high_sync()
733 * This test checks that memory.max limits the amount of
734 * memory which can be consumed by either anonymous memory
750 if (cg_read_strcmp(memcg, "memory.max", "max\n")) in test_memcg_max()
753 if (cg_write(memcg, "memory.swap.max", "0")) in test_memcg_max()
756 if (cg_write(memcg, "memory.max", "30M")) in test_memcg_max()
766 current = cg_read_long(memcg, "memory.current"); in test_memcg_max()
770 max = cg_read_key_long(memcg, "memory.events", "max "); in test_memcg_max()
784 * Reclaim from @memcg until usage reaches @goal by writing to
785 * memory.reclaim.
790 * This function assumes that writing to memory.reclaim is the only
791 * source of change in memory.current (no concurrent allocations or
794 * This function makes sure memory.reclaim is sane. It will return
795 * false if memory.reclaim's error codes do not make sense, even if
805 for (retries = 5; retries > 0; retries--) { in reclaim_until()
806 current = cg_read_long(memcg, "memory.current"); in reclaim_until()
810 /* Did memory.reclaim return 0 incorrectly? */ in reclaim_until()
814 to_reclaim = current - goal; in reclaim_until()
816 err = cg_write(memcg, "memory.reclaim", buf); in reclaim_until()
819 else if (err != -EAGAIN) in reclaim_until()
826 * This test checks that memory.reclaim reclaims the given
827 * amount of memory (from both anon and file, if possible).
832 int fd = -1; in test_memcg_reclaim()
844 current = cg_read_long(memcg, "memory.current"); in test_memcg_reclaim()
855 * If swap is enabled, try to reclaim from both anon and file, else try in test_memcg_reclaim()
856 * to reclaim from file only. in test_memcg_reclaim()
869 while (!values_close(cg_read_long(memcg, "memory.current"), in test_memcg_reclaim()
871 if (retries--) { in test_memcg_reclaim()
876 "failed to allocate %ld for memcg reclaim test\n", in test_memcg_reclaim()
904 int ret = -1; in alloc_anon_50M_check_swap()
909 return -1; in alloc_anon_50M_check_swap()
915 mem_current = cg_read_long(cgroup, "memory.current"); in alloc_anon_50M_check_swap()
919 swap_current = cg_read_long(cgroup, "memory.swap.current"); in alloc_anon_50M_check_swap()
931 * This test checks that memory.swap.max limits the amount of
932 * anonymous memory which can be swapped out. Additionally, it verifies that
933 * memory.swap.peak reflects the high watermark and can be reset.
941 int swap_peak_fd = -1, mem_peak_fd = -1; in test_memcg_swap_max_peak()
943 /* any non-empty string resets */ in test_memcg_swap_max_peak()
956 if (cg_read_long(memcg, "memory.swap.current")) { in test_memcg_swap_max_peak()
961 swap_peak_fd = cg_open(memcg, "memory.swap.peak", in test_memcg_swap_max_peak()
964 if (swap_peak_fd == -1) { in test_memcg_swap_max_peak()
971 * Before we try to use memory.swap.peak's fd, try to figure out in test_memcg_swap_max_peak()
972 * whether this kernel supports writing to that file in the first in test_memcg_swap_max_peak()
983 mem_peak_fd = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC); in test_memcg_swap_max_peak()
985 if (mem_peak_fd == -1) in test_memcg_swap_max_peak()
988 if (cg_read_long(memcg, "memory.swap.peak")) in test_memcg_swap_max_peak()
994 /* switch the swap and mem fds into local-peak tracking mode*/ in test_memcg_swap_max_peak()
1003 if (cg_read_long(memcg, "memory.peak")) in test_memcg_swap_max_peak()
1016 if (cg_read_strcmp(memcg, "memory.max", "max\n")) in test_memcg_swap_max_peak()
1019 if (cg_read_strcmp(memcg, "memory.swap.max", "max\n")) in test_memcg_swap_max_peak()
1022 if (cg_write(memcg, "memory.swap.max", "30M")) in test_memcg_swap_max_peak()
1025 if (cg_write(memcg, "memory.max", "30M")) in test_memcg_swap_max_peak()
1032 if (cg_read_key_long(memcg, "memory.events", "oom ") != 1) in test_memcg_swap_max_peak()
1035 if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 1) in test_memcg_swap_max_peak()
1038 peak = cg_read_long(memcg, "memory.peak"); in test_memcg_swap_max_peak()
1042 peak = cg_read_long(memcg, "memory.swap.peak"); in test_memcg_swap_max_peak()
1055 * open, reset and close the peak swap on another FD to make sure in test_memcg_swap_max_peak()
1056 * multiple extant fds don't corrupt the linked-list in test_memcg_swap_max_peak()
1058 peak_reset = cg_write(memcg, "memory.swap.peak", (char *)reset_string); in test_memcg_swap_max_peak()
1062 peak_reset = cg_write(memcg, "memory.peak", (char *)reset_string); in test_memcg_swap_max_peak()
1081 * with the open FD accounted to it. in test_memcg_swap_max_peak()
1087 if (cg_read_long(memcg, "memory.peak") < MB(29)) in test_memcg_swap_max_peak()
1090 if (cg_read_long(memcg, "memory.swap.peak") < MB(29)) in test_memcg_swap_max_peak()
1096 max = cg_read_key_long(memcg, "memory.events", "max "); in test_memcg_swap_max_peak()
1100 peak = cg_read_long(memcg, "memory.peak"); in test_memcg_swap_max_peak()
1104 peak = cg_read_long(memcg, "memory.swap.peak"); in test_memcg_swap_max_peak()
1119 if (mem_peak_fd != -1 && close(mem_peak_fd)) in test_memcg_swap_max_peak()
1121 if (swap_peak_fd != -1 && close(swap_peak_fd)) in test_memcg_swap_max_peak()
1130 * This test disables swapping and tries to allocate anonymous memory
1131 * up to OOM. Then it checks for oom and oom_kill events in
1132 * memory.events.
1146 if (cg_write(memcg, "memory.max", "30M")) in test_memcg_oom_events()
1149 if (cg_write(memcg, "memory.swap.max", "0")) in test_memcg_oom_events()
1158 if (cg_read_key_long(memcg, "memory.events", "oom ") != 1) in test_memcg_oom_events()
1161 if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 1) in test_memcg_oom_events()
1183 int sk, client_sk, ctl_fd, yes = 1, ret = -1; in tcp_server()
1185 close(srv_args->ctl[0]); in tcp_server()
1186 ctl_fd = srv_args->ctl[1]; in tcp_server()
1190 saddr.sin6_port = htons(srv_args->port); in tcp_server()
1209 ret = -1; in tcp_server()
1217 ret = -1; in tcp_server()
1244 allocated = cg_read_long(cgroup, "memory.current"); in tcp_client()
1250 sk = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol); in tcp_client()
1254 ret = connect(sk, ai->ai_addr, ai->ai_addrlen); in tcp_client()
1259 while (retries--) { in tcp_client()
1266 current = cg_read_long(cgroup, "memory.current"); in tcp_client()
1267 sock = cg_read_key_long(cgroup, "memory.stat", "sock "); in tcp_client()
1272 /* exclude the memory not related to socket connection */ in tcp_client()
1273 if (values_close(current - allocated, sock, 10)) { in tcp_client()
1287 * This test checks socket memory accounting.
1289 * and 61000. Once it gets a client connection, it starts writing to
1292 * memory.current and memory.stat.sock are similar.
1307 while (bind_retries--) { in test_memcg_sock()
1344 if (cg_read_long(memcg, "memory.current") < 0) in test_memcg_sock()
1347 if (cg_read_key_long(memcg, "memory.stat", "sock ")) in test_memcg_sock()
1360 * This test disables swapping and tries to allocate anonymous memory
1361 * up to OOM with memory.group.oom set. Then it checks that all
1363 * were propagated to the parent level.
1383 if (cg_write(parent, "cgroup.subtree_control", "+memory")) in test_memcg_oom_group_leaf_events()
1386 if (cg_write(child, "memory.max", "50M")) in test_memcg_oom_group_leaf_events()
1389 if (cg_write(child, "memory.swap.max", "0")) in test_memcg_oom_group_leaf_events()
1392 if (cg_write(child, "memory.oom.group", "1")) in test_memcg_oom_group_leaf_events()
1404 if (cg_read_key_long(child, "memory.events", "oom_kill ") <= 0) in test_memcg_oom_group_leaf_events()
1408 parent, "memory.events", "oom_kill "); in test_memcg_oom_group_leaf_events()
1433 * This test disables swapping and tries to allocate anonymous memory
1434 * up to OOM with memory.group.oom set. Then it checks that all
1454 if (cg_write(parent, "memory.max", "80M")) in test_memcg_oom_group_parent_events()
1457 if (cg_write(parent, "memory.swap.max", "0")) in test_memcg_oom_group_parent_events()
1460 if (cg_write(parent, "memory.oom.group", "1")) in test_memcg_oom_group_parent_events()
1489 * This test disables swapping and tries to allocate anonymous memory
1490 * up to OOM with memory.group.oom set. Then it checks that all
1507 if (cg_write(memcg, "memory.max", "50M")) in test_memcg_oom_group_score_events()
1510 if (cg_write(memcg, "memory.swap.max", "0")) in test_memcg_oom_group_score_events()
1513 if (cg_write(memcg, "memory.oom.group", "1")) in test_memcg_oom_group_score_events()
1524 if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 3) in test_memcg_oom_group_score_events()
1571 * Check that memory controller is available: in main()
1572 * memory is listed in cgroup.controllers in main()
1574 if (cg_read_strstr(root, "cgroup.controllers", "memory")) in main()
1575 ksft_exit_skip("memory controller isn't available\n"); in main()
1577 if (cg_read_strstr(root, "cgroup.subtree_control", "memory")) in main()
1578 if (cg_write(root, "cgroup.subtree_control", "+memory")) in main()
1579 ksft_exit_skip("Failed to set memory controller\n"); in main()
1583 ksft_exit_skip("Failed to query cgroup mount option\n"); in main()
1588 ksft_exit_skip("Failed to query cgroup mount option\n"); in main()