101d541baSAndrea Righi // SPDX-License-Identifier: GPL-2.0 201d541baSAndrea Righi /* 301d541baSAndrea Righi * Copyright (c) 2025 Andrea Righi <arighi@nvidia.com> 401d541baSAndrea Righi */ 501d541baSAndrea Righi #include <bpf/bpf.h> 601d541baSAndrea Righi #include <scx/common.h> 701d541baSAndrea Righi #include <sys/wait.h> 801d541baSAndrea Righi #include <unistd.h> 901d541baSAndrea Righi #include "allowed_cpus.bpf.skel.h" 1001d541baSAndrea Righi #include "scx_test.h" 1101d541baSAndrea Righi 1201d541baSAndrea Righi static enum scx_test_status setup(void **ctx) 1301d541baSAndrea Righi { 1401d541baSAndrea Righi struct allowed_cpus *skel; 1501d541baSAndrea Righi 1601d541baSAndrea Righi skel = allowed_cpus__open(); 1701d541baSAndrea Righi SCX_FAIL_IF(!skel, "Failed to open"); 1801d541baSAndrea Righi SCX_ENUM_INIT(skel); 1901d541baSAndrea Righi SCX_FAIL_IF(allowed_cpus__load(skel), "Failed to load skel"); 2001d541baSAndrea Righi 2101d541baSAndrea Righi *ctx = skel; 2201d541baSAndrea Righi 2301d541baSAndrea Righi return SCX_TEST_PASS; 2401d541baSAndrea Righi } 2501d541baSAndrea Righi 26*e764295aSAndrea Righi static int test_select_cpu_from_user(const struct allowed_cpus *skel) 27*e764295aSAndrea Righi { 28*e764295aSAndrea Righi int fd, ret; 29*e764295aSAndrea Righi __u64 args[1]; 30*e764295aSAndrea Righi 31*e764295aSAndrea Righi LIBBPF_OPTS(bpf_test_run_opts, attr, 32*e764295aSAndrea Righi .ctx_in = args, 33*e764295aSAndrea Righi .ctx_size_in = sizeof(args), 34*e764295aSAndrea Righi ); 35*e764295aSAndrea Righi 36*e764295aSAndrea Righi args[0] = getpid(); 37*e764295aSAndrea Righi fd = bpf_program__fd(skel->progs.select_cpu_from_user); 38*e764295aSAndrea Righi if (fd < 0) 39*e764295aSAndrea Righi return fd; 40*e764295aSAndrea Righi 41*e764295aSAndrea Righi ret = bpf_prog_test_run_opts(fd, &attr); 42*e764295aSAndrea Righi if (ret < 0) 43*e764295aSAndrea Righi return ret; 44*e764295aSAndrea Righi 45*e764295aSAndrea Righi fprintf(stderr, "%s: CPU %d\n", __func__, attr.retval); 46*e764295aSAndrea Righi 47*e764295aSAndrea Righi return 0; 48*e764295aSAndrea Righi } 49*e764295aSAndrea Righi 5001d541baSAndrea Righi static enum scx_test_status run(void *ctx) 5101d541baSAndrea Righi { 5201d541baSAndrea Righi struct allowed_cpus *skel = ctx; 5301d541baSAndrea Righi struct bpf_link *link; 5401d541baSAndrea Righi 5501d541baSAndrea Righi link = bpf_map__attach_struct_ops(skel->maps.allowed_cpus_ops); 5601d541baSAndrea Righi SCX_FAIL_IF(!link, "Failed to attach scheduler"); 5701d541baSAndrea Righi 58*e764295aSAndrea Righi /* Pick an idle CPU from user-space */ 59*e764295aSAndrea Righi SCX_FAIL_IF(test_select_cpu_from_user(skel), "Failed to pick idle CPU"); 60*e764295aSAndrea Righi 6101d541baSAndrea Righi /* Just sleeping is fine, plenty of scheduling events happening */ 6201d541baSAndrea Righi sleep(1); 6301d541baSAndrea Righi 6401d541baSAndrea Righi SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_NONE)); 6501d541baSAndrea Righi bpf_link__destroy(link); 6601d541baSAndrea Righi 6701d541baSAndrea Righi return SCX_TEST_PASS; 6801d541baSAndrea Righi } 6901d541baSAndrea Righi 7001d541baSAndrea Righi static void cleanup(void *ctx) 7101d541baSAndrea Righi { 7201d541baSAndrea Righi struct allowed_cpus *skel = ctx; 7301d541baSAndrea Righi 7401d541baSAndrea Righi allowed_cpus__destroy(skel); 7501d541baSAndrea Righi } 7601d541baSAndrea Righi 7701d541baSAndrea Righi struct scx_test allowed_cpus = { 7801d541baSAndrea Righi .name = "allowed_cpus", 7901d541baSAndrea Righi .description = "Verify scx_bpf_select_cpu_and()", 8001d541baSAndrea Righi .setup = setup, 8101d541baSAndrea Righi .run = run, 8201d541baSAndrea Righi .cleanup = cleanup, 8301d541baSAndrea Righi }; 8401d541baSAndrea Righi REGISTER_SCX_TEST(&allowed_cpus) 85