1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Landlock tests - Enforcing the same restrictions across multiple threads
4 *
5 * Copyright © 2025 Günther Noack <gnoack3000@gmail.com>
6 */
7
8 #define _GNU_SOURCE
9 #include <linux/landlock.h>
10 #include <pthread.h>
11 #include <signal.h>
12 #include <sys/prctl.h>
13
14 #include "common.h"
15
16 /* create_ruleset - Create a simple ruleset FD common to all tests */
create_ruleset(struct __test_metadata * const _metadata)17 static int create_ruleset(struct __test_metadata *const _metadata)
18 {
19 struct landlock_ruleset_attr ruleset_attr = {
20 .handled_access_fs = (LANDLOCK_ACCESS_FS_WRITE_FILE |
21 LANDLOCK_ACCESS_FS_TRUNCATE),
22 };
23 const int ruleset_fd =
24 landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
25
26 ASSERT_LE(0, ruleset_fd)
27 {
28 TH_LOG("landlock_create_ruleset: %s", strerror(errno));
29 }
30 return ruleset_fd;
31 }
32
TEST(single_threaded_success)33 TEST(single_threaded_success)
34 {
35 const int ruleset_fd = create_ruleset(_metadata);
36
37 disable_caps(_metadata);
38
39 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
40 ASSERT_EQ(0, landlock_restrict_self(ruleset_fd,
41 LANDLOCK_RESTRICT_SELF_TSYNC));
42
43 EXPECT_EQ(0, close(ruleset_fd));
44 }
45
store_no_new_privs(void * data)46 static void store_no_new_privs(void *data)
47 {
48 bool *nnp = data;
49
50 if (!nnp)
51 return;
52 *nnp = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
53 }
54
idle(void * data)55 static void *idle(void *data)
56 {
57 pthread_cleanup_push(store_no_new_privs, data);
58
59 while (true)
60 sleep(1);
61
62 pthread_cleanup_pop(1);
63 }
64
TEST(multi_threaded_success)65 TEST(multi_threaded_success)
66 {
67 pthread_t t1, t2;
68 bool no_new_privs1, no_new_privs2;
69 const int ruleset_fd = create_ruleset(_metadata);
70
71 disable_caps(_metadata);
72
73 ASSERT_EQ(0, pthread_create(&t1, NULL, idle, &no_new_privs1));
74 ASSERT_EQ(0, pthread_create(&t2, NULL, idle, &no_new_privs2));
75
76 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
77
78 EXPECT_EQ(0, landlock_restrict_self(ruleset_fd,
79 LANDLOCK_RESTRICT_SELF_TSYNC));
80
81 ASSERT_EQ(0, pthread_cancel(t1));
82 ASSERT_EQ(0, pthread_cancel(t2));
83 ASSERT_EQ(0, pthread_join(t1, NULL));
84 ASSERT_EQ(0, pthread_join(t2, NULL));
85
86 /* The no_new_privs flag was implicitly enabled on all threads. */
87 EXPECT_TRUE(no_new_privs1);
88 EXPECT_TRUE(no_new_privs2);
89
90 EXPECT_EQ(0, close(ruleset_fd));
91 }
92
TEST(multi_threaded_success_despite_diverging_domains)93 TEST(multi_threaded_success_despite_diverging_domains)
94 {
95 pthread_t t1, t2;
96 const int ruleset_fd = create_ruleset(_metadata);
97
98 disable_caps(_metadata);
99
100 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
101
102 ASSERT_EQ(0, pthread_create(&t1, NULL, idle, NULL));
103 ASSERT_EQ(0, pthread_create(&t2, NULL, idle, NULL));
104
105 /*
106 * The main thread enforces a ruleset,
107 * thereby bringing the threads' Landlock domains out of sync.
108 */
109 EXPECT_EQ(0, landlock_restrict_self(ruleset_fd, 0));
110
111 /* Still, TSYNC succeeds, bringing the threads in sync again. */
112 EXPECT_EQ(0, landlock_restrict_self(ruleset_fd,
113 LANDLOCK_RESTRICT_SELF_TSYNC));
114
115 ASSERT_EQ(0, pthread_cancel(t1));
116 ASSERT_EQ(0, pthread_cancel(t2));
117 ASSERT_EQ(0, pthread_join(t1, NULL));
118 ASSERT_EQ(0, pthread_join(t2, NULL));
119 EXPECT_EQ(0, close(ruleset_fd));
120 }
121
122 struct thread_restrict_data {
123 pthread_t t;
124 int ruleset_fd;
125 int result;
126 };
127
thread_restrict(void * data)128 static void *thread_restrict(void *data)
129 {
130 struct thread_restrict_data *d = data;
131
132 d->result = landlock_restrict_self(d->ruleset_fd,
133 LANDLOCK_RESTRICT_SELF_TSYNC);
134 return NULL;
135 }
136
TEST(competing_enablement)137 TEST(competing_enablement)
138 {
139 const int ruleset_fd = create_ruleset(_metadata);
140 struct thread_restrict_data d[] = {
141 { .ruleset_fd = ruleset_fd },
142 { .ruleset_fd = ruleset_fd },
143 };
144
145 disable_caps(_metadata);
146
147 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
148 ASSERT_EQ(0, pthread_create(&d[0].t, NULL, thread_restrict, &d[0]));
149 ASSERT_EQ(0, pthread_create(&d[1].t, NULL, thread_restrict, &d[1]));
150
151 /* Wait for threads to finish. */
152 ASSERT_EQ(0, pthread_join(d[0].t, NULL));
153 ASSERT_EQ(0, pthread_join(d[1].t, NULL));
154
155 /* Expect that both succeeded. */
156 EXPECT_EQ(0, d[0].result);
157 EXPECT_EQ(0, d[1].result);
158
159 EXPECT_EQ(0, close(ruleset_fd));
160 }
161
signal_nop_handler(int sig)162 static void signal_nop_handler(int sig)
163 {
164 }
165
166 struct signaler_data {
167 pthread_t target;
168 volatile bool stop;
169 };
170
signaler_thread(void * data)171 static void *signaler_thread(void *data)
172 {
173 struct signaler_data *sd = data;
174
175 while (!sd->stop)
176 pthread_kill(sd->target, SIGUSR1);
177
178 return NULL;
179 }
180
181 /*
182 * Number of idle sibling threads. This must be large enough that even on
183 * machines with many cores, the sibling threads cannot all complete their
184 * credential preparation in a single parallel wave, otherwise the signaler
185 * thread has no window to interrupt wait_for_completion_interruptible().
186 * 200 threads on a 64-core machine yields ~3 serialized waves, giving the
187 * tight signal loop enough time to land an interruption.
188 */
189 #define NUM_IDLE_THREADS 200
190
191 /*
192 * Exercises the tsync interruption and cancellation paths in tsync.c.
193 *
194 * When a signal interrupts the calling thread while it waits for sibling
195 * threads to finish their credential preparation
196 * (wait_for_completion_interruptible in landlock_restrict_sibling_threads),
197 * the kernel sets ERESTARTNOINTR, cancels queued task works that have not
198 * started yet (cancel_tsync_works), then waits for the remaining works to
199 * finish. On the error return, syscalls.c aborts the prepared credentials.
200 * The kernel automatically restarts the syscall, so userspace sees success.
201 */
TEST(tsync_interrupt)202 TEST(tsync_interrupt)
203 {
204 size_t i;
205 pthread_t threads[NUM_IDLE_THREADS];
206 pthread_t signaler;
207 struct signaler_data sd;
208 struct sigaction sa = {};
209 const int ruleset_fd = create_ruleset(_metadata);
210
211 disable_caps(_metadata);
212
213 /* Install a no-op SIGUSR1 handler so the signal does not kill us. */
214 sa.sa_handler = signal_nop_handler;
215 sigemptyset(&sa.sa_mask);
216 ASSERT_EQ(0, sigaction(SIGUSR1, &sa, NULL));
217
218 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
219
220 for (i = 0; i < NUM_IDLE_THREADS; i++)
221 ASSERT_EQ(0, pthread_create(&threads[i], NULL, idle, NULL));
222
223 /*
224 * Start a signaler thread that continuously sends SIGUSR1 to the
225 * calling thread. This maximizes the chance of interrupting
226 * wait_for_completion_interruptible() in the kernel's tsync path.
227 */
228 sd.target = pthread_self();
229 sd.stop = false;
230 ASSERT_EQ(0, pthread_create(&signaler, NULL, signaler_thread, &sd));
231
232 /*
233 * The syscall may be interrupted and transparently restarted by the
234 * kernel (ERESTARTNOINTR). From userspace, it should always succeed.
235 */
236 EXPECT_EQ(0, landlock_restrict_self(ruleset_fd,
237 LANDLOCK_RESTRICT_SELF_TSYNC));
238
239 sd.stop = true;
240 ASSERT_EQ(0, pthread_join(signaler, NULL));
241
242 for (i = 0; i < NUM_IDLE_THREADS; i++) {
243 ASSERT_EQ(0, pthread_cancel(threads[i]));
244 ASSERT_EQ(0, pthread_join(threads[i], NULL));
245 }
246
247 EXPECT_EQ(0, close(ruleset_fd));
248 }
249
250 /* clang-format off */
FIXTURE(tsync_without_ruleset)251 FIXTURE(tsync_without_ruleset) {};
252 /* clang-format on */
253
FIXTURE_VARIANT(tsync_without_ruleset)254 FIXTURE_VARIANT(tsync_without_ruleset)
255 {
256 const __u32 flags;
257 const int expected_errno;
258 };
259
260 /* clang-format off */
FIXTURE_VARIANT_ADD(tsync_without_ruleset,tsync_only)261 FIXTURE_VARIANT_ADD(tsync_without_ruleset, tsync_only) {
262 /* clang-format on */
263 .flags = LANDLOCK_RESTRICT_SELF_TSYNC,
264 .expected_errno = EBADF,
265 };
266
267 /* clang-format off */
FIXTURE_VARIANT_ADD(tsync_without_ruleset,subdomains_off_same_exec_off)268 FIXTURE_VARIANT_ADD(tsync_without_ruleset, subdomains_off_same_exec_off) {
269 /* clang-format on */
270 .flags = LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF |
271 LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF |
272 LANDLOCK_RESTRICT_SELF_TSYNC,
273 .expected_errno = EBADF,
274 };
275
276 /* clang-format off */
FIXTURE_VARIANT_ADD(tsync_without_ruleset,subdomains_off_new_exec_on)277 FIXTURE_VARIANT_ADD(tsync_without_ruleset, subdomains_off_new_exec_on) {
278 /* clang-format on */
279 .flags = LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF |
280 LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON |
281 LANDLOCK_RESTRICT_SELF_TSYNC,
282 .expected_errno = EBADF,
283 };
284
285 /* clang-format off */
FIXTURE_VARIANT_ADD(tsync_without_ruleset,all_flags)286 FIXTURE_VARIANT_ADD(tsync_without_ruleset, all_flags) {
287 /* clang-format on */
288 .flags = LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF |
289 LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON |
290 LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF |
291 LANDLOCK_RESTRICT_SELF_TSYNC,
292 .expected_errno = EBADF,
293 };
294
295 /* clang-format off */
FIXTURE_VARIANT_ADD(tsync_without_ruleset,subdomains_off)296 FIXTURE_VARIANT_ADD(tsync_without_ruleset, subdomains_off) {
297 /* clang-format on */
298 .flags = LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF |
299 LANDLOCK_RESTRICT_SELF_TSYNC,
300 .expected_errno = 0,
301 };
302
FIXTURE_SETUP(tsync_without_ruleset)303 FIXTURE_SETUP(tsync_without_ruleset)
304 {
305 disable_caps(_metadata);
306 }
307
FIXTURE_TEARDOWN(tsync_without_ruleset)308 FIXTURE_TEARDOWN(tsync_without_ruleset)
309 {
310 }
311
TEST_F(tsync_without_ruleset,check)312 TEST_F(tsync_without_ruleset, check)
313 {
314 int ret;
315
316 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
317
318 ret = landlock_restrict_self(-1, variant->flags);
319 if (variant->expected_errno) {
320 EXPECT_EQ(-1, ret);
321 EXPECT_EQ(variant->expected_errno, errno);
322 } else {
323 EXPECT_EQ(0, ret);
324 }
325 }
326
327 TEST_HARNESS_MAIN
328