1*090d34f0STejun Heo /* SPDX-License-Identifier: GPL-2.0 */
2*090d34f0STejun Heo /*
3*090d34f0STejun Heo * Stress concurrent SCX_KICK_WAIT calls to reproduce wait-cycle deadlock.
4*090d34f0STejun Heo *
5*090d34f0STejun Heo * Three CPUs are designated from userspace. Every enqueue from one of the
6*090d34f0STejun Heo * three CPUs kicks the next CPU in the ring with SCX_KICK_WAIT, creating a
7*090d34f0STejun Heo * persistent A -> B -> C -> A wait cycle pressure.
8*090d34f0STejun Heo */
9*090d34f0STejun Heo #include <scx/common.bpf.h>
10*090d34f0STejun Heo
11*090d34f0STejun Heo char _license[] SEC("license") = "GPL";
12*090d34f0STejun Heo
13*090d34f0STejun Heo const volatile s32 test_cpu_a;
14*090d34f0STejun Heo const volatile s32 test_cpu_b;
15*090d34f0STejun Heo const volatile s32 test_cpu_c;
16*090d34f0STejun Heo
17*090d34f0STejun Heo u64 nr_enqueues;
18*090d34f0STejun Heo u64 nr_wait_kicks;
19*090d34f0STejun Heo
20*090d34f0STejun Heo UEI_DEFINE(uei);
21*090d34f0STejun Heo
target_cpu(s32 cpu)22*090d34f0STejun Heo static s32 target_cpu(s32 cpu)
23*090d34f0STejun Heo {
24*090d34f0STejun Heo if (cpu == test_cpu_a)
25*090d34f0STejun Heo return test_cpu_b;
26*090d34f0STejun Heo if (cpu == test_cpu_b)
27*090d34f0STejun Heo return test_cpu_c;
28*090d34f0STejun Heo if (cpu == test_cpu_c)
29*090d34f0STejun Heo return test_cpu_a;
30*090d34f0STejun Heo return -1;
31*090d34f0STejun Heo }
32*090d34f0STejun Heo
BPF_STRUCT_OPS(cyclic_kick_wait_enqueue,struct task_struct * p,u64 enq_flags)33*090d34f0STejun Heo void BPF_STRUCT_OPS(cyclic_kick_wait_enqueue, struct task_struct *p,
34*090d34f0STejun Heo u64 enq_flags)
35*090d34f0STejun Heo {
36*090d34f0STejun Heo s32 this_cpu = bpf_get_smp_processor_id();
37*090d34f0STejun Heo s32 tgt;
38*090d34f0STejun Heo
39*090d34f0STejun Heo __sync_fetch_and_add(&nr_enqueues, 1);
40*090d34f0STejun Heo
41*090d34f0STejun Heo if (p->flags & PF_KTHREAD) {
42*090d34f0STejun Heo scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_INF,
43*090d34f0STejun Heo enq_flags | SCX_ENQ_PREEMPT);
44*090d34f0STejun Heo return;
45*090d34f0STejun Heo }
46*090d34f0STejun Heo
47*090d34f0STejun Heo scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
48*090d34f0STejun Heo
49*090d34f0STejun Heo tgt = target_cpu(this_cpu);
50*090d34f0STejun Heo if (tgt < 0 || tgt == this_cpu)
51*090d34f0STejun Heo return;
52*090d34f0STejun Heo
53*090d34f0STejun Heo __sync_fetch_and_add(&nr_wait_kicks, 1);
54*090d34f0STejun Heo scx_bpf_kick_cpu(tgt, SCX_KICK_WAIT);
55*090d34f0STejun Heo }
56*090d34f0STejun Heo
BPF_STRUCT_OPS(cyclic_kick_wait_exit,struct scx_exit_info * ei)57*090d34f0STejun Heo void BPF_STRUCT_OPS(cyclic_kick_wait_exit, struct scx_exit_info *ei)
58*090d34f0STejun Heo {
59*090d34f0STejun Heo UEI_RECORD(uei, ei);
60*090d34f0STejun Heo }
61*090d34f0STejun Heo
62*090d34f0STejun Heo SEC(".struct_ops.link")
63*090d34f0STejun Heo struct sched_ext_ops cyclic_kick_wait_ops = {
64*090d34f0STejun Heo .enqueue = cyclic_kick_wait_enqueue,
65*090d34f0STejun Heo .exit = cyclic_kick_wait_exit,
66*090d34f0STejun Heo .name = "cyclic_kick_wait",
67*090d34f0STejun Heo .timeout_ms = 1000U,
68*090d34f0STejun Heo };
69