xref: /linux/tools/testing/selftests/bpf/progs/res_spin_lock.c (revision b6d27a345f9d12fb80d61a1b1801ced9c1d6178a)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2024-2025 Meta Platforms, Inc. and affiliates. */
3 #include <vmlinux.h>
4 #include <bpf/bpf_tracing.h>
5 #include <bpf/bpf_helpers.h>
6 #include "bpf_misc.h"
7 
8 #define EDEADLK 35
9 #define ETIMEDOUT 110
10 
11 struct arr_elem {
12 	struct bpf_res_spin_lock lock;
13 };
14 
15 struct {
16 	__uint(type, BPF_MAP_TYPE_ARRAY);
17 	__uint(max_entries, 64);
18 	__type(key, int);
19 	__type(value, struct arr_elem);
20 } arrmap SEC(".maps");
21 
22 struct bpf_res_spin_lock lockA __hidden SEC(".data.A");
23 struct bpf_res_spin_lock lockB __hidden SEC(".data.B");
24 
25 SEC("tc")
26 int res_spin_lock_test(struct __sk_buff *ctx)
27 {
28 	struct arr_elem *elem1, *elem2;
29 	int r;
30 
31 	elem1 = bpf_map_lookup_elem(&arrmap, &(int){0});
32 	if (!elem1)
33 		return -1;
34 	elem2 = bpf_map_lookup_elem(&arrmap, &(int){0});
35 	if (!elem2)
36 		return -1;
37 
38 	r = bpf_res_spin_lock(&elem1->lock);
39 	if (r)
40 		return r;
41 	if (!bpf_res_spin_lock(&elem2->lock)) {
42 		bpf_res_spin_unlock(&elem2->lock);
43 		bpf_res_spin_unlock(&elem1->lock);
44 		return -1;
45 	}
46 	bpf_res_spin_unlock(&elem1->lock);
47 	return 0;
48 }
49 
50 SEC("tc")
51 int res_spin_lock_test_AB(struct __sk_buff *ctx)
52 {
53 	int r;
54 
55 	r = bpf_res_spin_lock(&lockA);
56 	if (r)
57 		return !r;
58 	/* Only unlock if we took the lock. */
59 	if (!bpf_res_spin_lock(&lockB))
60 		bpf_res_spin_unlock(&lockB);
61 	bpf_res_spin_unlock(&lockA);
62 	return 0;
63 }
64 
65 int err;
66 
67 SEC("tc")
68 int res_spin_lock_test_BA(struct __sk_buff *ctx)
69 {
70 	int r;
71 
72 	r = bpf_res_spin_lock(&lockB);
73 	if (r)
74 		return !r;
75 	if (!bpf_res_spin_lock(&lockA))
76 		bpf_res_spin_unlock(&lockA);
77 	else
78 		err = -EDEADLK;
79 	bpf_res_spin_unlock(&lockB);
80 	return err ?: 0;
81 }
82 
83 SEC("tc")
84 int res_spin_lock_test_held_lock_max(struct __sk_buff *ctx)
85 {
86 	struct bpf_res_spin_lock *locks[48] = {};
87 	struct arr_elem *e;
88 	u64 time_beg, time;
89 	int ret = 0, i;
90 
91 	_Static_assert(ARRAY_SIZE(((struct rqspinlock_held){}).locks) == 31,
92 		       "RES_NR_HELD assumed to be 31");
93 
94 	for (i = 0; i < 34; i++) {
95 		int key = i;
96 
97 		/* We cannot pass in i as it will get spilled/filled by the compiler and
98 		 * loses bounds in verifier state.
99 		 */
100 		e = bpf_map_lookup_elem(&arrmap, &key);
101 		if (!e)
102 			return 1;
103 		locks[i] = &e->lock;
104 	}
105 
106 	for (; i < 48; i++) {
107 		int key = i - 2;
108 
109 		/* We cannot pass in i as it will get spilled/filled by the compiler and
110 		 * loses bounds in verifier state.
111 		 */
112 		e = bpf_map_lookup_elem(&arrmap, &key);
113 		if (!e)
114 			return 1;
115 		locks[i] = &e->lock;
116 	}
117 
118 	time_beg = bpf_ktime_get_ns();
119 	for (i = 0; i < 34; i++) {
120 		if (bpf_res_spin_lock(locks[i]))
121 			goto end;
122 	}
123 
124 	/* Trigger AA, after exhausting entries in the held lock table. This
125 	 * time, only the timeout can save us, as AA detection won't succeed.
126 	 */
127 	if (!bpf_res_spin_lock(locks[34])) {
128 		bpf_res_spin_unlock(locks[34]);
129 		ret = 1;
130 		goto end;
131 	}
132 
133 end:
134 	for (i = i - 1; i >= 0; i--)
135 		bpf_res_spin_unlock(locks[i]);
136 	time = bpf_ktime_get_ns() - time_beg;
137 	/* Time spent should be easily above our limit (1/4 s), since AA
138 	 * detection won't be expedited due to lack of held lock entry.
139 	 */
140 	return ret ?: (time > 1000000000 / 4 ? 0 : 1);
141 }
142 
143 char _license[] SEC("license") = "GPL";
144