xref: /linux/tools/testing/selftests/bpf/progs/res_spin_lock.c (revision b676ac484f847bbe5c7d29603f41475b64fefe55)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2024-2025 Meta Platforms, Inc. and affiliates. */
3 #include <vmlinux.h>
4 #include <bpf/bpf_tracing.h>
5 #include <bpf/bpf_helpers.h>
6 #include "bpf_misc.h"
7 
8 #define EDEADLK 35
9 #define ETIMEDOUT 110
10 
11 struct arr_elem {
12 	struct bpf_res_spin_lock lock;
13 };
14 
15 struct {
16 	__uint(type, BPF_MAP_TYPE_ARRAY);
17 	__uint(max_entries, 64);
18 	__type(key, int);
19 	__type(value, struct arr_elem);
20 } arrmap SEC(".maps");
21 
22 struct bpf_res_spin_lock lockA __hidden SEC(".data.A");
23 struct bpf_res_spin_lock lockB __hidden SEC(".data.B");
24 
25 SEC("tc")
res_spin_lock_test(struct __sk_buff * ctx)26 int res_spin_lock_test(struct __sk_buff *ctx)
27 {
28 	struct arr_elem *elem1, *elem2;
29 	int r;
30 
31 	elem1 = bpf_map_lookup_elem(&arrmap, &(int){0});
32 	if (!elem1)
33 		return -1;
34 	elem2 = bpf_map_lookup_elem(&arrmap, &(int){0});
35 	if (!elem2)
36 		return -1;
37 
38 	r = bpf_res_spin_lock(&elem1->lock);
39 	if (r)
40 		return r;
41 	r = bpf_res_spin_lock(&elem2->lock);
42 	if (!r) {
43 		bpf_res_spin_unlock(&elem2->lock);
44 		bpf_res_spin_unlock(&elem1->lock);
45 		return -1;
46 	}
47 	bpf_res_spin_unlock(&elem1->lock);
48 	return r != -EDEADLK;
49 }
50 
51 SEC("tc")
res_spin_lock_test_AB(struct __sk_buff * ctx)52 int res_spin_lock_test_AB(struct __sk_buff *ctx)
53 {
54 	int r;
55 
56 	r = bpf_res_spin_lock(&lockA);
57 	if (r)
58 		return !r;
59 	/* Only unlock if we took the lock. */
60 	if (!bpf_res_spin_lock(&lockB))
61 		bpf_res_spin_unlock(&lockB);
62 	bpf_res_spin_unlock(&lockA);
63 	return 0;
64 }
65 
66 int err;
67 
68 SEC("tc")
res_spin_lock_test_BA(struct __sk_buff * ctx)69 int res_spin_lock_test_BA(struct __sk_buff *ctx)
70 {
71 	int r;
72 
73 	r = bpf_res_spin_lock(&lockB);
74 	if (r)
75 		return !r;
76 	if (!bpf_res_spin_lock(&lockA))
77 		bpf_res_spin_unlock(&lockA);
78 	else
79 		err = -EDEADLK;
80 	bpf_res_spin_unlock(&lockB);
81 	return err ?: 0;
82 }
83 
84 SEC("tc")
res_spin_lock_test_held_lock_max(struct __sk_buff * ctx)85 int res_spin_lock_test_held_lock_max(struct __sk_buff *ctx)
86 {
87 	struct bpf_res_spin_lock *locks[48] = {};
88 	struct arr_elem *e;
89 	u64 time_beg, time;
90 	int ret = 0, i;
91 
92 	_Static_assert(ARRAY_SIZE(((struct rqspinlock_held){}).locks) == 31,
93 		       "RES_NR_HELD assumed to be 31");
94 
95 	for (i = 0; i < 34; i++) {
96 		int key = i;
97 
98 		/* We cannot pass in i as it will get spilled/filled by the compiler and
99 		 * loses bounds in verifier state.
100 		 */
101 		e = bpf_map_lookup_elem(&arrmap, &key);
102 		if (!e)
103 			return 1;
104 		locks[i] = &e->lock;
105 	}
106 
107 	for (; i < 48; i++) {
108 		int key = i - 2;
109 
110 		/* We cannot pass in i as it will get spilled/filled by the compiler and
111 		 * loses bounds in verifier state.
112 		 */
113 		e = bpf_map_lookup_elem(&arrmap, &key);
114 		if (!e)
115 			return 1;
116 		locks[i] = &e->lock;
117 	}
118 
119 	time_beg = bpf_ktime_get_ns();
120 	for (i = 0; i < 34; i++) {
121 		if (bpf_res_spin_lock(locks[i]))
122 			goto end;
123 	}
124 
125 	/* Trigger AA, after exhausting entries in the held lock table. This
126 	 * time, only the timeout can save us, as AA detection won't succeed.
127 	 */
128 	ret = bpf_res_spin_lock(locks[34]);
129 	if (!ret) {
130 		bpf_res_spin_unlock(locks[34]);
131 		ret = 1;
132 		goto end;
133 	}
134 
135 	ret = ret != -ETIMEDOUT ? 2 : 0;
136 
137 end:
138 	for (i = i - 1; i >= 0; i--)
139 		bpf_res_spin_unlock(locks[i]);
140 	time = bpf_ktime_get_ns() - time_beg;
141 	/* Time spent should be easily above our limit (1/4 s), since AA
142 	 * detection won't be expedited due to lack of held lock entry.
143 	 */
144 	return ret ?: (time > 1000000000 / 4 ? 0 : 1);
145 }
146 
147 char _license[] SEC("license") = "GPL";
148