xref: /linux/tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
3 #include <linux/sched.h>
4 #include <linux/smp.h>
5 #include <linux/delay.h>
6 #include <linux/module.h>
7 #include <linux/prandom.h>
8 #include <asm/rqspinlock.h>
9 #include <linux/perf_event.h>
10 #include <linux/kthread.h>
11 #include <linux/atomic.h>
12 #include <linux/slab.h>
13 
14 static struct perf_event_attr hw_attr = {
15 	.type		= PERF_TYPE_HARDWARE,
16 	.config		= PERF_COUNT_HW_CPU_CYCLES,
17 	.size		= sizeof(struct perf_event_attr),
18 	.pinned		= 1,
19 	.disabled	= 1,
20 	.sample_period	= 100000,
21 };
22 
23 static rqspinlock_t lock_a;
24 static rqspinlock_t lock_b;
25 
26 static struct perf_event **rqsl_evts;
27 static int rqsl_nevts;
28 
29 static bool test_ab = false;
30 module_param(test_ab, bool, 0644);
31 MODULE_PARM_DESC(test_ab, "Test ABBA situations instead of AA situations");
32 
33 static struct task_struct **rqsl_threads;
34 static int rqsl_nthreads;
35 static atomic_t rqsl_ready_cpus = ATOMIC_INIT(0);
36 
37 static int pause = 0;
38 
39 static bool nmi_locks_a(int cpu)
40 {
41 	return (cpu & 1) && test_ab;
42 }
43 
44 static int rqspinlock_worker_fn(void *arg)
45 {
46 	int cpu = smp_processor_id();
47 	unsigned long flags;
48 	int ret;
49 
50 	if (cpu) {
51 		atomic_inc(&rqsl_ready_cpus);
52 
53 		while (!kthread_should_stop()) {
54 			if (READ_ONCE(pause)) {
55 				msleep(1000);
56 				continue;
57 			}
58 			if (nmi_locks_a(cpu))
59 				ret = raw_res_spin_lock_irqsave(&lock_b, flags);
60 			else
61 				ret = raw_res_spin_lock_irqsave(&lock_a, flags);
62 			mdelay(20);
63 			if (nmi_locks_a(cpu) && !ret)
64 				raw_res_spin_unlock_irqrestore(&lock_b, flags);
65 			else if (!ret)
66 				raw_res_spin_unlock_irqrestore(&lock_a, flags);
67 			cpu_relax();
68 		}
69 		return 0;
70 	}
71 
72 	while (!kthread_should_stop()) {
73 		int expected = rqsl_nthreads > 0 ? rqsl_nthreads - 1 : 0;
74 		int ready = atomic_read(&rqsl_ready_cpus);
75 
76 		if (ready == expected && !READ_ONCE(pause)) {
77 			for (int i = 0; i < rqsl_nevts; i++)
78 				perf_event_enable(rqsl_evts[i]);
79 			pr_err("Waiting 5 secs to pause the test\n");
80 			msleep(1000 * 5);
81 			WRITE_ONCE(pause, 1);
82 			pr_err("Paused the test\n");
83 		} else {
84 			msleep(1000);
85 			cpu_relax();
86 		}
87 	}
88 	return 0;
89 }
90 
91 static void nmi_cb(struct perf_event *event, struct perf_sample_data *data,
92 		   struct pt_regs *regs)
93 {
94 	int cpu = smp_processor_id();
95 	unsigned long flags;
96 	int ret;
97 
98 	if (!cpu || READ_ONCE(pause))
99 		return;
100 
101 	if (nmi_locks_a(cpu))
102 		ret = raw_res_spin_lock_irqsave(&lock_a, flags);
103 	else
104 		ret = raw_res_spin_lock_irqsave(test_ab ? &lock_b : &lock_a, flags);
105 
106 	mdelay(10);
107 
108 	if (nmi_locks_a(cpu) && !ret)
109 		raw_res_spin_unlock_irqrestore(&lock_a, flags);
110 	else if (!ret)
111 		raw_res_spin_unlock_irqrestore(test_ab ? &lock_b : &lock_a, flags);
112 }
113 
114 static void free_rqsl_threads(void)
115 {
116 	int i;
117 
118 	if (rqsl_threads) {
119 		for_each_online_cpu(i) {
120 			if (rqsl_threads[i])
121 				kthread_stop(rqsl_threads[i]);
122 		}
123 		kfree(rqsl_threads);
124 	}
125 }
126 
127 static void free_rqsl_evts(void)
128 {
129 	int i;
130 
131 	if (rqsl_evts) {
132 		for (i = 0; i < rqsl_nevts; i++) {
133 			if (rqsl_evts[i])
134 				perf_event_release_kernel(rqsl_evts[i]);
135 		}
136 		kfree(rqsl_evts);
137 	}
138 }
139 
140 static int bpf_test_rqspinlock_init(void)
141 {
142 	int i, ret;
143 	int ncpus = num_online_cpus();
144 
145 	pr_err("Mode = %s\n", test_ab ? "ABBA" : "AA");
146 
147 	if (ncpus < 3)
148 		return -ENOTSUPP;
149 
150 	raw_res_spin_lock_init(&lock_a);
151 	raw_res_spin_lock_init(&lock_b);
152 
153 	rqsl_evts = kcalloc(ncpus - 1, sizeof(*rqsl_evts), GFP_KERNEL);
154 	if (!rqsl_evts)
155 		return -ENOMEM;
156 	rqsl_nevts = ncpus - 1;
157 
158 	for (i = 1; i < ncpus; i++) {
159 		struct perf_event *e;
160 
161 		e = perf_event_create_kernel_counter(&hw_attr, i, NULL, nmi_cb, NULL);
162 		if (IS_ERR(e)) {
163 			ret = PTR_ERR(e);
164 			goto err_perf_events;
165 		}
166 		rqsl_evts[i - 1] = e;
167 	}
168 
169 	rqsl_threads = kcalloc(ncpus, sizeof(*rqsl_threads), GFP_KERNEL);
170 	if (!rqsl_threads) {
171 		ret = -ENOMEM;
172 		goto err_perf_events;
173 	}
174 	rqsl_nthreads = ncpus;
175 
176 	for_each_online_cpu(i) {
177 		struct task_struct *t;
178 
179 		t = kthread_create(rqspinlock_worker_fn, NULL, "rqsl_w/%d", i);
180 		if (IS_ERR(t)) {
181 			ret = PTR_ERR(t);
182 			goto err_threads_create;
183 		}
184 		kthread_bind(t, i);
185 		rqsl_threads[i] = t;
186 		wake_up_process(t);
187 	}
188 	return 0;
189 
190 err_threads_create:
191 	free_rqsl_threads();
192 err_perf_events:
193 	free_rqsl_evts();
194 	return ret;
195 }
196 
197 module_init(bpf_test_rqspinlock_init);
198 
199 static void bpf_test_rqspinlock_exit(void)
200 {
201 	free_rqsl_threads();
202 	free_rqsl_evts();
203 }
204 
205 module_exit(bpf_test_rqspinlock_exit);
206 
207 MODULE_AUTHOR("Kumar Kartikeya Dwivedi");
208 MODULE_DESCRIPTION("BPF rqspinlock stress test module");
209 MODULE_LICENSE("GPL");
210