xref: /linux/tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c (revision 4f38da1f027ea2c9f01bb71daa7a299c191b6940)
1*15cf3922SKumar Kartikeya Dwivedi // SPDX-License-Identifier: GPL-2.0
2*15cf3922SKumar Kartikeya Dwivedi /* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
3*15cf3922SKumar Kartikeya Dwivedi #include <linux/sched.h>
4*15cf3922SKumar Kartikeya Dwivedi #include <linux/smp.h>
5*15cf3922SKumar Kartikeya Dwivedi #include <linux/delay.h>
6*15cf3922SKumar Kartikeya Dwivedi #include <linux/module.h>
7*15cf3922SKumar Kartikeya Dwivedi #include <linux/prandom.h>
8*15cf3922SKumar Kartikeya Dwivedi #include <asm/rqspinlock.h>
9*15cf3922SKumar Kartikeya Dwivedi #include <linux/perf_event.h>
10*15cf3922SKumar Kartikeya Dwivedi #include <linux/kthread.h>
11*15cf3922SKumar Kartikeya Dwivedi #include <linux/atomic.h>
12*15cf3922SKumar Kartikeya Dwivedi #include <linux/slab.h>
13*15cf3922SKumar Kartikeya Dwivedi 
14*15cf3922SKumar Kartikeya Dwivedi static struct perf_event_attr hw_attr = {
15*15cf3922SKumar Kartikeya Dwivedi 	.type		= PERF_TYPE_HARDWARE,
16*15cf3922SKumar Kartikeya Dwivedi 	.config		= PERF_COUNT_HW_CPU_CYCLES,
17*15cf3922SKumar Kartikeya Dwivedi 	.size		= sizeof(struct perf_event_attr),
18*15cf3922SKumar Kartikeya Dwivedi 	.pinned		= 1,
19*15cf3922SKumar Kartikeya Dwivedi 	.disabled	= 1,
20*15cf3922SKumar Kartikeya Dwivedi 	.sample_period	= 100000,
21*15cf3922SKumar Kartikeya Dwivedi };
22*15cf3922SKumar Kartikeya Dwivedi 
23*15cf3922SKumar Kartikeya Dwivedi static rqspinlock_t lock_a;
24*15cf3922SKumar Kartikeya Dwivedi static rqspinlock_t lock_b;
25*15cf3922SKumar Kartikeya Dwivedi 
26*15cf3922SKumar Kartikeya Dwivedi static struct perf_event **rqsl_evts;
27*15cf3922SKumar Kartikeya Dwivedi static int rqsl_nevts;
28*15cf3922SKumar Kartikeya Dwivedi 
29*15cf3922SKumar Kartikeya Dwivedi static bool test_ab = false;
30*15cf3922SKumar Kartikeya Dwivedi module_param(test_ab, bool, 0644);
31*15cf3922SKumar Kartikeya Dwivedi MODULE_PARM_DESC(test_ab, "Test ABBA situations instead of AA situations");
32*15cf3922SKumar Kartikeya Dwivedi 
33*15cf3922SKumar Kartikeya Dwivedi static struct task_struct **rqsl_threads;
34*15cf3922SKumar Kartikeya Dwivedi static int rqsl_nthreads;
35*15cf3922SKumar Kartikeya Dwivedi static atomic_t rqsl_ready_cpus = ATOMIC_INIT(0);
36*15cf3922SKumar Kartikeya Dwivedi 
37*15cf3922SKumar Kartikeya Dwivedi static int pause = 0;
38*15cf3922SKumar Kartikeya Dwivedi 
39*15cf3922SKumar Kartikeya Dwivedi static bool nmi_locks_a(int cpu)
40*15cf3922SKumar Kartikeya Dwivedi {
41*15cf3922SKumar Kartikeya Dwivedi 	return (cpu & 1) && test_ab;
42*15cf3922SKumar Kartikeya Dwivedi }
43*15cf3922SKumar Kartikeya Dwivedi 
44*15cf3922SKumar Kartikeya Dwivedi static int rqspinlock_worker_fn(void *arg)
45*15cf3922SKumar Kartikeya Dwivedi {
46*15cf3922SKumar Kartikeya Dwivedi 	int cpu = smp_processor_id();
47*15cf3922SKumar Kartikeya Dwivedi 	unsigned long flags;
48*15cf3922SKumar Kartikeya Dwivedi 	int ret;
49*15cf3922SKumar Kartikeya Dwivedi 
50*15cf3922SKumar Kartikeya Dwivedi 	if (cpu) {
51*15cf3922SKumar Kartikeya Dwivedi 		atomic_inc(&rqsl_ready_cpus);
52*15cf3922SKumar Kartikeya Dwivedi 
53*15cf3922SKumar Kartikeya Dwivedi 		while (!kthread_should_stop()) {
54*15cf3922SKumar Kartikeya Dwivedi 			if (READ_ONCE(pause)) {
55*15cf3922SKumar Kartikeya Dwivedi 				msleep(1000);
56*15cf3922SKumar Kartikeya Dwivedi 				continue;
57*15cf3922SKumar Kartikeya Dwivedi 			}
58*15cf3922SKumar Kartikeya Dwivedi 			if (nmi_locks_a(cpu))
59*15cf3922SKumar Kartikeya Dwivedi 				ret = raw_res_spin_lock_irqsave(&lock_b, flags);
60*15cf3922SKumar Kartikeya Dwivedi 			else
61*15cf3922SKumar Kartikeya Dwivedi 				ret = raw_res_spin_lock_irqsave(&lock_a, flags);
62*15cf3922SKumar Kartikeya Dwivedi 			mdelay(20);
63*15cf3922SKumar Kartikeya Dwivedi 			if (nmi_locks_a(cpu) && !ret)
64*15cf3922SKumar Kartikeya Dwivedi 				raw_res_spin_unlock_irqrestore(&lock_b, flags);
65*15cf3922SKumar Kartikeya Dwivedi 			else if (!ret)
66*15cf3922SKumar Kartikeya Dwivedi 				raw_res_spin_unlock_irqrestore(&lock_a, flags);
67*15cf3922SKumar Kartikeya Dwivedi 			cpu_relax();
68*15cf3922SKumar Kartikeya Dwivedi 		}
69*15cf3922SKumar Kartikeya Dwivedi 		return 0;
70*15cf3922SKumar Kartikeya Dwivedi 	}
71*15cf3922SKumar Kartikeya Dwivedi 
72*15cf3922SKumar Kartikeya Dwivedi 	while (!kthread_should_stop()) {
73*15cf3922SKumar Kartikeya Dwivedi 		int expected = rqsl_nthreads > 0 ? rqsl_nthreads - 1 : 0;
74*15cf3922SKumar Kartikeya Dwivedi 		int ready = atomic_read(&rqsl_ready_cpus);
75*15cf3922SKumar Kartikeya Dwivedi 
76*15cf3922SKumar Kartikeya Dwivedi 		if (ready == expected && !READ_ONCE(pause)) {
77*15cf3922SKumar Kartikeya Dwivedi 			for (int i = 0; i < rqsl_nevts; i++)
78*15cf3922SKumar Kartikeya Dwivedi 				perf_event_enable(rqsl_evts[i]);
79*15cf3922SKumar Kartikeya Dwivedi 			pr_err("Waiting 5 secs to pause the test\n");
80*15cf3922SKumar Kartikeya Dwivedi 			msleep(1000 * 5);
81*15cf3922SKumar Kartikeya Dwivedi 			WRITE_ONCE(pause, 1);
82*15cf3922SKumar Kartikeya Dwivedi 			pr_err("Paused the test\n");
83*15cf3922SKumar Kartikeya Dwivedi 		} else {
84*15cf3922SKumar Kartikeya Dwivedi 			msleep(1000);
85*15cf3922SKumar Kartikeya Dwivedi 			cpu_relax();
86*15cf3922SKumar Kartikeya Dwivedi 		}
87*15cf3922SKumar Kartikeya Dwivedi 	}
88*15cf3922SKumar Kartikeya Dwivedi 	return 0;
89*15cf3922SKumar Kartikeya Dwivedi }
90*15cf3922SKumar Kartikeya Dwivedi 
91*15cf3922SKumar Kartikeya Dwivedi static void nmi_cb(struct perf_event *event, struct perf_sample_data *data,
92*15cf3922SKumar Kartikeya Dwivedi 		   struct pt_regs *regs)
93*15cf3922SKumar Kartikeya Dwivedi {
94*15cf3922SKumar Kartikeya Dwivedi 	int cpu = smp_processor_id();
95*15cf3922SKumar Kartikeya Dwivedi 	unsigned long flags;
96*15cf3922SKumar Kartikeya Dwivedi 	int ret;
97*15cf3922SKumar Kartikeya Dwivedi 
98*15cf3922SKumar Kartikeya Dwivedi 	if (!cpu || READ_ONCE(pause))
99*15cf3922SKumar Kartikeya Dwivedi 		return;
100*15cf3922SKumar Kartikeya Dwivedi 
101*15cf3922SKumar Kartikeya Dwivedi 	if (nmi_locks_a(cpu))
102*15cf3922SKumar Kartikeya Dwivedi 		ret = raw_res_spin_lock_irqsave(&lock_a, flags);
103*15cf3922SKumar Kartikeya Dwivedi 	else
104*15cf3922SKumar Kartikeya Dwivedi 		ret = raw_res_spin_lock_irqsave(test_ab ? &lock_b : &lock_a, flags);
105*15cf3922SKumar Kartikeya Dwivedi 
106*15cf3922SKumar Kartikeya Dwivedi 	mdelay(10);
107*15cf3922SKumar Kartikeya Dwivedi 
108*15cf3922SKumar Kartikeya Dwivedi 	if (nmi_locks_a(cpu) && !ret)
109*15cf3922SKumar Kartikeya Dwivedi 		raw_res_spin_unlock_irqrestore(&lock_a, flags);
110*15cf3922SKumar Kartikeya Dwivedi 	else if (!ret)
111*15cf3922SKumar Kartikeya Dwivedi 		raw_res_spin_unlock_irqrestore(test_ab ? &lock_b : &lock_a, flags);
112*15cf3922SKumar Kartikeya Dwivedi }
113*15cf3922SKumar Kartikeya Dwivedi 
114*15cf3922SKumar Kartikeya Dwivedi static void free_rqsl_threads(void)
115*15cf3922SKumar Kartikeya Dwivedi {
116*15cf3922SKumar Kartikeya Dwivedi 	int i;
117*15cf3922SKumar Kartikeya Dwivedi 
118*15cf3922SKumar Kartikeya Dwivedi 	if (rqsl_threads) {
119*15cf3922SKumar Kartikeya Dwivedi 		for_each_online_cpu(i) {
120*15cf3922SKumar Kartikeya Dwivedi 			if (rqsl_threads[i])
121*15cf3922SKumar Kartikeya Dwivedi 				kthread_stop(rqsl_threads[i]);
122*15cf3922SKumar Kartikeya Dwivedi 		}
123*15cf3922SKumar Kartikeya Dwivedi 		kfree(rqsl_threads);
124*15cf3922SKumar Kartikeya Dwivedi 	}
125*15cf3922SKumar Kartikeya Dwivedi }
126*15cf3922SKumar Kartikeya Dwivedi 
127*15cf3922SKumar Kartikeya Dwivedi static void free_rqsl_evts(void)
128*15cf3922SKumar Kartikeya Dwivedi {
129*15cf3922SKumar Kartikeya Dwivedi 	int i;
130*15cf3922SKumar Kartikeya Dwivedi 
131*15cf3922SKumar Kartikeya Dwivedi 	if (rqsl_evts) {
132*15cf3922SKumar Kartikeya Dwivedi 		for (i = 0; i < rqsl_nevts; i++) {
133*15cf3922SKumar Kartikeya Dwivedi 			if (rqsl_evts[i])
134*15cf3922SKumar Kartikeya Dwivedi 				perf_event_release_kernel(rqsl_evts[i]);
135*15cf3922SKumar Kartikeya Dwivedi 		}
136*15cf3922SKumar Kartikeya Dwivedi 		kfree(rqsl_evts);
137*15cf3922SKumar Kartikeya Dwivedi 	}
138*15cf3922SKumar Kartikeya Dwivedi }
139*15cf3922SKumar Kartikeya Dwivedi 
140*15cf3922SKumar Kartikeya Dwivedi static int bpf_test_rqspinlock_init(void)
141*15cf3922SKumar Kartikeya Dwivedi {
142*15cf3922SKumar Kartikeya Dwivedi 	int i, ret;
143*15cf3922SKumar Kartikeya Dwivedi 	int ncpus = num_online_cpus();
144*15cf3922SKumar Kartikeya Dwivedi 
145*15cf3922SKumar Kartikeya Dwivedi 	pr_err("Mode = %s\n", test_ab ? "ABBA" : "AA");
146*15cf3922SKumar Kartikeya Dwivedi 
147*15cf3922SKumar Kartikeya Dwivedi 	if (ncpus < 3)
148*15cf3922SKumar Kartikeya Dwivedi 		return -ENOTSUPP;
149*15cf3922SKumar Kartikeya Dwivedi 
150*15cf3922SKumar Kartikeya Dwivedi 	raw_res_spin_lock_init(&lock_a);
151*15cf3922SKumar Kartikeya Dwivedi 	raw_res_spin_lock_init(&lock_b);
152*15cf3922SKumar Kartikeya Dwivedi 
153*15cf3922SKumar Kartikeya Dwivedi 	rqsl_evts = kcalloc(ncpus - 1, sizeof(*rqsl_evts), GFP_KERNEL);
154*15cf3922SKumar Kartikeya Dwivedi 	if (!rqsl_evts)
155*15cf3922SKumar Kartikeya Dwivedi 		return -ENOMEM;
156*15cf3922SKumar Kartikeya Dwivedi 	rqsl_nevts = ncpus - 1;
157*15cf3922SKumar Kartikeya Dwivedi 
158*15cf3922SKumar Kartikeya Dwivedi 	for (i = 1; i < ncpus; i++) {
159*15cf3922SKumar Kartikeya Dwivedi 		struct perf_event *e;
160*15cf3922SKumar Kartikeya Dwivedi 
161*15cf3922SKumar Kartikeya Dwivedi 		e = perf_event_create_kernel_counter(&hw_attr, i, NULL, nmi_cb, NULL);
162*15cf3922SKumar Kartikeya Dwivedi 		if (IS_ERR(e)) {
163*15cf3922SKumar Kartikeya Dwivedi 			ret = PTR_ERR(e);
164*15cf3922SKumar Kartikeya Dwivedi 			goto err_perf_events;
165*15cf3922SKumar Kartikeya Dwivedi 		}
166*15cf3922SKumar Kartikeya Dwivedi 		rqsl_evts[i - 1] = e;
167*15cf3922SKumar Kartikeya Dwivedi 	}
168*15cf3922SKumar Kartikeya Dwivedi 
169*15cf3922SKumar Kartikeya Dwivedi 	rqsl_threads = kcalloc(ncpus, sizeof(*rqsl_threads), GFP_KERNEL);
170*15cf3922SKumar Kartikeya Dwivedi 	if (!rqsl_threads) {
171*15cf3922SKumar Kartikeya Dwivedi 		ret = -ENOMEM;
172*15cf3922SKumar Kartikeya Dwivedi 		goto err_perf_events;
173*15cf3922SKumar Kartikeya Dwivedi 	}
174*15cf3922SKumar Kartikeya Dwivedi 	rqsl_nthreads = ncpus;
175*15cf3922SKumar Kartikeya Dwivedi 
176*15cf3922SKumar Kartikeya Dwivedi 	for_each_online_cpu(i) {
177*15cf3922SKumar Kartikeya Dwivedi 		struct task_struct *t;
178*15cf3922SKumar Kartikeya Dwivedi 
179*15cf3922SKumar Kartikeya Dwivedi 		t = kthread_create(rqspinlock_worker_fn, NULL, "rqsl_w/%d", i);
180*15cf3922SKumar Kartikeya Dwivedi 		if (IS_ERR(t)) {
181*15cf3922SKumar Kartikeya Dwivedi 			ret = PTR_ERR(t);
182*15cf3922SKumar Kartikeya Dwivedi 			goto err_threads_create;
183*15cf3922SKumar Kartikeya Dwivedi 		}
184*15cf3922SKumar Kartikeya Dwivedi 		kthread_bind(t, i);
185*15cf3922SKumar Kartikeya Dwivedi 		rqsl_threads[i] = t;
186*15cf3922SKumar Kartikeya Dwivedi 		wake_up_process(t);
187*15cf3922SKumar Kartikeya Dwivedi 	}
188*15cf3922SKumar Kartikeya Dwivedi 	return 0;
189*15cf3922SKumar Kartikeya Dwivedi 
190*15cf3922SKumar Kartikeya Dwivedi err_threads_create:
191*15cf3922SKumar Kartikeya Dwivedi 	free_rqsl_threads();
192*15cf3922SKumar Kartikeya Dwivedi err_perf_events:
193*15cf3922SKumar Kartikeya Dwivedi 	free_rqsl_evts();
194*15cf3922SKumar Kartikeya Dwivedi 	return ret;
195*15cf3922SKumar Kartikeya Dwivedi }
196*15cf3922SKumar Kartikeya Dwivedi 
197*15cf3922SKumar Kartikeya Dwivedi module_init(bpf_test_rqspinlock_init);
198*15cf3922SKumar Kartikeya Dwivedi 
199*15cf3922SKumar Kartikeya Dwivedi static void bpf_test_rqspinlock_exit(void)
200*15cf3922SKumar Kartikeya Dwivedi {
201*15cf3922SKumar Kartikeya Dwivedi 	free_rqsl_threads();
202*15cf3922SKumar Kartikeya Dwivedi 	free_rqsl_evts();
203*15cf3922SKumar Kartikeya Dwivedi }
204*15cf3922SKumar Kartikeya Dwivedi 
205*15cf3922SKumar Kartikeya Dwivedi module_exit(bpf_test_rqspinlock_exit);
206*15cf3922SKumar Kartikeya Dwivedi 
207*15cf3922SKumar Kartikeya Dwivedi MODULE_AUTHOR("Kumar Kartikeya Dwivedi");
208*15cf3922SKumar Kartikeya Dwivedi MODULE_DESCRIPTION("BPF rqspinlock stress test module");
209*15cf3922SKumar Kartikeya Dwivedi MODULE_LICENSE("GPL");
210