xref: /linux/kernel/rseq.c (revision 61706251492eff650e91c58507bc77e1a12c7fbb)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Restartable sequences system call
4  *
5  * Copyright (C) 2015, Google, Inc.,
6  * Paul Turner <pjt@google.com> and Andrew Hunter <ahh@google.com>
7  * Copyright (C) 2015-2018, EfficiOS Inc.,
8  * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9  */
10 
11 /*
12  * Restartable sequences are a lightweight interface that allows
13  * user-level code to be executed atomically relative to scheduler
14  * preemption and signal delivery. Typically used for implementing
15  * per-cpu operations.
16  *
17  * It allows user-space to perform update operations on per-cpu data
18  * without requiring heavy-weight atomic operations.
19  *
20  * Detailed algorithm of rseq user-space assembly sequences:
21  *
22  *                     init(rseq_cs)
23  *                     cpu = TLS->rseq::cpu_id_start
24  *   [1]               TLS->rseq::rseq_cs = rseq_cs
25  *   [start_ip]        ----------------------------
26  *   [2]               if (cpu != TLS->rseq::cpu_id)
27  *                             goto abort_ip;
28  *   [3]               <last_instruction_in_cs>
29  *   [post_commit_ip]  ----------------------------
30  *
31  *   The address of jump target abort_ip must be outside the critical
32  *   region, i.e.:
33  *
34  *     [abort_ip] < [start_ip]  || [abort_ip] >= [post_commit_ip]
35  *
36  *   Steps [2]-[3] (inclusive) need to be a sequence of instructions in
37  *   userspace that can handle being interrupted between any of those
38  *   instructions, and then resumed to the abort_ip.
39  *
40  *   1.  Userspace stores the address of the struct rseq_cs assembly
41  *       block descriptor into the rseq_cs field of the registered
42  *       struct rseq TLS area. This update is performed through a single
43  *       store within the inline assembly instruction sequence.
44  *       [start_ip]
45  *
46  *   2.  Userspace tests to check whether the current cpu_id field match
47  *       the cpu number loaded before start_ip, branching to abort_ip
48  *       in case of a mismatch.
49  *
50  *       If the sequence is preempted or interrupted by a signal
51  *       at or after start_ip and before post_commit_ip, then the kernel
52  *       clears TLS->__rseq_abi::rseq_cs, and sets the user-space return
53  *       ip to abort_ip before returning to user-space, so the preempted
54  *       execution resumes at abort_ip.
55  *
56  *   3.  Userspace critical section final instruction before
57  *       post_commit_ip is the commit. The critical section is
58  *       self-terminating.
59  *       [post_commit_ip]
60  *
61  *   4.  <success>
62  *
63  *   On failure at [2], or if interrupted by preempt or signal delivery
64  *   between [1] and [3]:
65  *
66  *       [abort_ip]
67  *   F1. <failure>
68  */
69 
70 /* Required to select the proper per_cpu ops for rseq_stats_inc() */
71 #define RSEQ_BUILD_SLOW_PATH
72 
73 #include <linux/debugfs.h>
74 #include <linux/hrtimer.h>
75 #include <linux/percpu.h>
76 #include <linux/prctl.h>
77 #include <linux/ratelimit.h>
78 #include <linux/rseq_entry.h>
79 #include <linux/sched.h>
80 #include <linux/syscalls.h>
81 #include <linux/uaccess.h>
82 #include <linux/types.h>
83 #include <linux/rseq.h>
84 #include <asm/ptrace.h>
85 
86 #define CREATE_TRACE_POINTS
87 #include <trace/events/rseq.h>
88 
89 DEFINE_STATIC_KEY_MAYBE(CONFIG_RSEQ_DEBUG_DEFAULT_ENABLE, rseq_debug_enabled);
90 
rseq_control_debug(bool on)91 static inline void rseq_control_debug(bool on)
92 {
93 	if (on)
94 		static_branch_enable(&rseq_debug_enabled);
95 	else
96 		static_branch_disable(&rseq_debug_enabled);
97 }
98 
rseq_setup_debug(char * str)99 static int __init rseq_setup_debug(char *str)
100 {
101 	bool on;
102 
103 	if (kstrtobool(str, &on))
104 		return -EINVAL;
105 	rseq_control_debug(on);
106 	return 1;
107 }
108 __setup("rseq_debug=", rseq_setup_debug);
109 
110 #ifdef CONFIG_TRACEPOINTS
111 /*
112  * Out of line, so the actual update functions can be in a header to be
113  * inlined into the exit to user code.
114  */
__rseq_trace_update(struct task_struct * t)115 void __rseq_trace_update(struct task_struct *t)
116 {
117 	trace_rseq_update(t);
118 }
119 
__rseq_trace_ip_fixup(unsigned long ip,unsigned long start_ip,unsigned long offset,unsigned long abort_ip)120 void __rseq_trace_ip_fixup(unsigned long ip, unsigned long start_ip,
121 			   unsigned long offset, unsigned long abort_ip)
122 {
123 	trace_rseq_ip_fixup(ip, start_ip, offset, abort_ip);
124 }
125 #endif /* CONFIG_TRACEPOINTS */
126 
127 #ifdef CONFIG_RSEQ_STATS
128 DEFINE_PER_CPU(struct rseq_stats, rseq_stats);
129 
rseq_stats_show(struct seq_file * m,void * p)130 static int rseq_stats_show(struct seq_file *m, void *p)
131 {
132 	struct rseq_stats stats = { };
133 	unsigned int cpu;
134 
135 	for_each_possible_cpu(cpu) {
136 		stats.exit	+= data_race(per_cpu(rseq_stats.exit, cpu));
137 		stats.signal	+= data_race(per_cpu(rseq_stats.signal, cpu));
138 		stats.slowpath	+= data_race(per_cpu(rseq_stats.slowpath, cpu));
139 		stats.fastpath	+= data_race(per_cpu(rseq_stats.fastpath, cpu));
140 		stats.ids	+= data_race(per_cpu(rseq_stats.ids, cpu));
141 		stats.cs	+= data_race(per_cpu(rseq_stats.cs, cpu));
142 		stats.clear	+= data_race(per_cpu(rseq_stats.clear, cpu));
143 		stats.fixup	+= data_race(per_cpu(rseq_stats.fixup, cpu));
144 		if (IS_ENABLED(CONFIG_RSEQ_SLICE_EXTENSION)) {
145 			stats.s_granted	+= data_race(per_cpu(rseq_stats.s_granted, cpu));
146 			stats.s_expired	+= data_race(per_cpu(rseq_stats.s_expired, cpu));
147 			stats.s_revoked	+= data_race(per_cpu(rseq_stats.s_revoked, cpu));
148 			stats.s_yielded	+= data_race(per_cpu(rseq_stats.s_yielded, cpu));
149 			stats.s_aborted	+= data_race(per_cpu(rseq_stats.s_aborted, cpu));
150 		}
151 	}
152 
153 	seq_printf(m, "exit:   %16lu\n", stats.exit);
154 	seq_printf(m, "signal: %16lu\n", stats.signal);
155 	seq_printf(m, "slowp:  %16lu\n", stats.slowpath);
156 	seq_printf(m, "fastp:  %16lu\n", stats.fastpath);
157 	seq_printf(m, "ids:    %16lu\n", stats.ids);
158 	seq_printf(m, "cs:     %16lu\n", stats.cs);
159 	seq_printf(m, "clear:  %16lu\n", stats.clear);
160 	seq_printf(m, "fixup:  %16lu\n", stats.fixup);
161 	if (IS_ENABLED(CONFIG_RSEQ_SLICE_EXTENSION)) {
162 		seq_printf(m, "sgrant: %16lu\n", stats.s_granted);
163 		seq_printf(m, "sexpir: %16lu\n", stats.s_expired);
164 		seq_printf(m, "srevok: %16lu\n", stats.s_revoked);
165 		seq_printf(m, "syield: %16lu\n", stats.s_yielded);
166 		seq_printf(m, "sabort: %16lu\n", stats.s_aborted);
167 	}
168 	return 0;
169 }
170 
rseq_stats_open(struct inode * inode,struct file * file)171 static int rseq_stats_open(struct inode *inode, struct file *file)
172 {
173 	return single_open(file, rseq_stats_show, inode->i_private);
174 }
175 
176 static const struct file_operations stat_ops = {
177 	.open		= rseq_stats_open,
178 	.read		= seq_read,
179 	.llseek		= seq_lseek,
180 	.release	= single_release,
181 };
182 
rseq_stats_init(struct dentry * root_dir)183 static int __init rseq_stats_init(struct dentry *root_dir)
184 {
185 	debugfs_create_file("stats", 0444, root_dir, NULL, &stat_ops);
186 	return 0;
187 }
188 #else
rseq_stats_init(struct dentry * root_dir)189 static inline void rseq_stats_init(struct dentry *root_dir) { }
190 #endif /* CONFIG_RSEQ_STATS */
191 
rseq_debug_show(struct seq_file * m,void * p)192 static int rseq_debug_show(struct seq_file *m, void *p)
193 {
194 	bool on = static_branch_unlikely(&rseq_debug_enabled);
195 
196 	seq_printf(m, "%d\n", on);
197 	return 0;
198 }
199 
rseq_debug_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)200 static ssize_t rseq_debug_write(struct file *file, const char __user *ubuf,
201 			    size_t count, loff_t *ppos)
202 {
203 	bool on;
204 
205 	if (kstrtobool_from_user(ubuf, count, &on))
206 		return -EINVAL;
207 
208 	rseq_control_debug(on);
209 	return count;
210 }
211 
rseq_debug_open(struct inode * inode,struct file * file)212 static int rseq_debug_open(struct inode *inode, struct file *file)
213 {
214 	return single_open(file, rseq_debug_show, inode->i_private);
215 }
216 
217 static const struct file_operations debug_ops = {
218 	.open		= rseq_debug_open,
219 	.read		= seq_read,
220 	.write		= rseq_debug_write,
221 	.llseek		= seq_lseek,
222 	.release	= single_release,
223 };
224 
225 static void rseq_slice_ext_init(struct dentry *root_dir);
226 
rseq_debugfs_init(void)227 static int __init rseq_debugfs_init(void)
228 {
229 	struct dentry *root_dir = debugfs_create_dir("rseq", NULL);
230 
231 	debugfs_create_file("debug", 0644, root_dir, NULL, &debug_ops);
232 	rseq_stats_init(root_dir);
233 	if (IS_ENABLED(CONFIG_RSEQ_SLICE_EXTENSION))
234 		rseq_slice_ext_init(root_dir);
235 	return 0;
236 }
237 __initcall(rseq_debugfs_init);
238 
rseq_set_ids(struct task_struct * t,struct rseq_ids * ids,u32 node_id)239 static bool rseq_set_ids(struct task_struct *t, struct rseq_ids *ids, u32 node_id)
240 {
241 	return rseq_set_ids_get_csaddr(t, ids, node_id, NULL);
242 }
243 
rseq_handle_cs(struct task_struct * t,struct pt_regs * regs)244 static bool rseq_handle_cs(struct task_struct *t, struct pt_regs *regs)
245 {
246 	struct rseq __user *urseq = t->rseq.usrptr;
247 	u64 csaddr;
248 
249 	scoped_user_read_access(urseq, efault)
250 		unsafe_get_user(csaddr, &urseq->rseq_cs, efault);
251 	if (likely(!csaddr))
252 		return true;
253 	return rseq_update_user_cs(t, regs, csaddr);
254 efault:
255 	return false;
256 }
257 
rseq_slowpath_update_usr(struct pt_regs * regs)258 static void rseq_slowpath_update_usr(struct pt_regs *regs)
259 {
260 	/*
261 	 * Preserve rseq state and user_irq state. The generic entry code
262 	 * clears user_irq on the way out, the non-generic entry
263 	 * architectures are not having user_irq.
264 	 */
265 	const struct rseq_event evt_mask = { .has_rseq = true, .user_irq = true, };
266 	struct task_struct *t = current;
267 	struct rseq_ids ids;
268 	u32 node_id;
269 	bool event;
270 
271 	if (unlikely(t->flags & PF_EXITING))
272 		return;
273 
274 	rseq_stat_inc(rseq_stats.slowpath);
275 
276 	/*
277 	 * Read and clear the event pending bit first. If the task
278 	 * was not preempted or migrated or a signal is on the way,
279 	 * there is no point in doing any of the heavy lifting here
280 	 * on production kernels. In that case TIF_NOTIFY_RESUME
281 	 * was raised by some other functionality.
282 	 *
283 	 * This is correct because the read/clear operation is
284 	 * guarded against scheduler preemption, which makes it CPU
285 	 * local atomic. If the task is preempted right after
286 	 * re-enabling preemption then TIF_NOTIFY_RESUME is set
287 	 * again and this function is invoked another time _before_
288 	 * the task is able to return to user mode.
289 	 *
290 	 * On a debug kernel, invoke the fixup code unconditionally
291 	 * with the result handed in to allow the detection of
292 	 * inconsistencies.
293 	 */
294 	scoped_guard(irq) {
295 		event = t->rseq.event.sched_switch;
296 		t->rseq.event.all &= evt_mask.all;
297 		ids.cpu_id = task_cpu(t);
298 		ids.mm_cid = task_mm_cid(t);
299 	}
300 
301 	if (!event)
302 		return;
303 
304 	node_id = cpu_to_node(ids.cpu_id);
305 
306 	if (unlikely(!rseq_update_usr(t, regs, &ids, node_id))) {
307 		/*
308 		 * Clear the errors just in case this might survive magically, but
309 		 * leave the rest intact.
310 		 */
311 		t->rseq.event.error = 0;
312 		force_sig(SIGSEGV);
313 	}
314 }
315 
__rseq_handle_slowpath(struct pt_regs * regs)316 void __rseq_handle_slowpath(struct pt_regs *regs)
317 {
318 	/*
319 	 * If invoked from hypervisors before entering the guest via
320 	 * resume_user_mode_work(), then @regs is a NULL pointer.
321 	 *
322 	 * resume_user_mode_work() clears TIF_NOTIFY_RESUME and re-raises
323 	 * it before returning from the ioctl() to user space when
324 	 * rseq_event.sched_switch is set.
325 	 *
326 	 * So it's safe to ignore here instead of pointlessly updating it
327 	 * in the vcpu_run() loop.
328 	 */
329 	if (!regs)
330 		return;
331 
332 	rseq_slowpath_update_usr(regs);
333 }
334 
__rseq_signal_deliver(int sig,struct pt_regs * regs)335 void __rseq_signal_deliver(int sig, struct pt_regs *regs)
336 {
337 	rseq_stat_inc(rseq_stats.signal);
338 	/*
339 	 * Don't update IDs, they are handled on exit to user if
340 	 * necessary. The important thing is to abort a critical section of
341 	 * the interrupted context as after this point the instruction
342 	 * pointer in @regs points to the signal handler.
343 	 */
344 	if (unlikely(!rseq_handle_cs(current, regs))) {
345 		/*
346 		 * Clear the errors just in case this might survive
347 		 * magically, but leave the rest intact.
348 		 */
349 		current->rseq.event.error = 0;
350 		force_sigsegv(sig);
351 	}
352 }
353 
354 /*
355  * Terminate the process if a syscall is issued within a restartable
356  * sequence.
357  */
__rseq_debug_syscall_return(struct pt_regs * regs)358 void __rseq_debug_syscall_return(struct pt_regs *regs)
359 {
360 	struct task_struct *t = current;
361 	u64 csaddr;
362 
363 	if (!t->rseq.event.has_rseq)
364 		return;
365 	if (get_user(csaddr, &t->rseq.usrptr->rseq_cs))
366 		goto fail;
367 	if (likely(!csaddr))
368 		return;
369 	if (unlikely(csaddr >= TASK_SIZE))
370 		goto fail;
371 	if (rseq_debug_update_user_cs(t, regs, csaddr))
372 		return;
373 fail:
374 	force_sig(SIGSEGV);
375 }
376 
377 #ifdef CONFIG_DEBUG_RSEQ
378 /* Kept around to keep GENERIC_ENTRY=n architectures supported. */
rseq_syscall(struct pt_regs * regs)379 void rseq_syscall(struct pt_regs *regs)
380 {
381 	__rseq_debug_syscall_return(regs);
382 }
383 #endif
384 
rseq_reset_ids(void)385 static bool rseq_reset_ids(void)
386 {
387 	struct rseq_ids ids = {
388 		.cpu_id		= RSEQ_CPU_ID_UNINITIALIZED,
389 		.mm_cid		= 0,
390 	};
391 
392 	/*
393 	 * If this fails, terminate it because this leaves the kernel in
394 	 * stupid state as exit to user space will try to fixup the ids
395 	 * again.
396 	 */
397 	if (rseq_set_ids(current, &ids, 0))
398 		return true;
399 
400 	force_sig(SIGSEGV);
401 	return false;
402 }
403 
404 /* The original rseq structure size (including padding) is 32 bytes. */
405 #define ORIG_RSEQ_SIZE		32
406 
407 /*
408  * sys_rseq - setup restartable sequences for caller thread.
409  */
SYSCALL_DEFINE4(rseq,struct rseq __user *,rseq,u32,rseq_len,int,flags,u32,sig)410 SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32, sig)
411 {
412 	u32 rseqfl = 0;
413 
414 	if (flags & RSEQ_FLAG_UNREGISTER) {
415 		if (flags & ~RSEQ_FLAG_UNREGISTER)
416 			return -EINVAL;
417 		/* Unregister rseq for current thread. */
418 		if (current->rseq.usrptr != rseq || !current->rseq.usrptr)
419 			return -EINVAL;
420 		if (rseq_len != current->rseq.len)
421 			return -EINVAL;
422 		if (current->rseq.sig != sig)
423 			return -EPERM;
424 		if (!rseq_reset_ids())
425 			return -EFAULT;
426 		rseq_reset(current);
427 		return 0;
428 	}
429 
430 	if (unlikely(flags & ~(RSEQ_FLAG_SLICE_EXT_DEFAULT_ON)))
431 		return -EINVAL;
432 
433 	if (current->rseq.usrptr) {
434 		/*
435 		 * If rseq is already registered, check whether
436 		 * the provided address differs from the prior
437 		 * one.
438 		 */
439 		if (current->rseq.usrptr != rseq || rseq_len != current->rseq.len)
440 			return -EINVAL;
441 		if (current->rseq.sig != sig)
442 			return -EPERM;
443 		/* Already registered. */
444 		return -EBUSY;
445 	}
446 
447 	/*
448 	 * If there was no rseq previously registered, ensure the provided rseq
449 	 * is properly aligned, as communcated to user-space through the ELF
450 	 * auxiliary vector AT_RSEQ_ALIGN. If rseq_len is the original rseq
451 	 * size, the required alignment is the original struct rseq alignment.
452 	 *
453 	 * The rseq_len is required to be greater or equal to the original rseq
454 	 * size. In order to be valid, rseq_len is either the original rseq size,
455 	 * or large enough to contain all supported fields, as communicated to
456 	 * user-space through the ELF auxiliary vector AT_RSEQ_FEATURE_SIZE.
457 	 */
458 	if (rseq_len < ORIG_RSEQ_SIZE ||
459 	    (rseq_len == ORIG_RSEQ_SIZE && !IS_ALIGNED((unsigned long)rseq, ORIG_RSEQ_SIZE)) ||
460 	    (rseq_len != ORIG_RSEQ_SIZE && (!IS_ALIGNED((unsigned long)rseq, rseq_alloc_align()) ||
461 					    rseq_len < offsetof(struct rseq, end))))
462 		return -EINVAL;
463 	if (!access_ok(rseq, rseq_len))
464 		return -EFAULT;
465 
466 	if (IS_ENABLED(CONFIG_RSEQ_SLICE_EXTENSION)) {
467 		rseqfl |= RSEQ_CS_FLAG_SLICE_EXT_AVAILABLE;
468 		if (rseq_slice_extension_enabled() &&
469 		    (flags & RSEQ_FLAG_SLICE_EXT_DEFAULT_ON))
470 			rseqfl |= RSEQ_CS_FLAG_SLICE_EXT_ENABLED;
471 	}
472 
473 	scoped_user_write_access(rseq, efault) {
474 		/*
475 		 * If the rseq_cs pointer is non-NULL on registration, clear it to
476 		 * avoid a potential segfault on return to user-space. The proper thing
477 		 * to do would have been to fail the registration but this would break
478 		 * older libcs that reuse the rseq area for new threads without
479 		 * clearing the fields. Don't bother reading it, just reset it.
480 		 */
481 		unsafe_put_user(0UL, &rseq->rseq_cs, efault);
482 		unsafe_put_user(rseqfl, &rseq->flags, efault);
483 		/* Initialize IDs in user space */
484 		unsafe_put_user(RSEQ_CPU_ID_UNINITIALIZED, &rseq->cpu_id_start, efault);
485 		unsafe_put_user(RSEQ_CPU_ID_UNINITIALIZED, &rseq->cpu_id, efault);
486 		unsafe_put_user(0U, &rseq->node_id, efault);
487 		unsafe_put_user(0U, &rseq->mm_cid, efault);
488 		unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
489 	}
490 
491 	/*
492 	 * Activate the registration by setting the rseq area address, length
493 	 * and signature in the task struct.
494 	 */
495 	current->rseq.usrptr = rseq;
496 	current->rseq.len = rseq_len;
497 	current->rseq.sig = sig;
498 
499 #ifdef CONFIG_RSEQ_SLICE_EXTENSION
500 	current->rseq.slice.state.enabled = !!(rseqfl & RSEQ_CS_FLAG_SLICE_EXT_ENABLED);
501 #endif
502 
503 	/*
504 	 * If rseq was previously inactive, and has just been
505 	 * registered, ensure the cpu_id_start and cpu_id fields
506 	 * are updated before returning to user-space.
507 	 */
508 	current->rseq.event.has_rseq = true;
509 	rseq_force_update();
510 	return 0;
511 
512 efault:
513 	return -EFAULT;
514 }
515 
516 #ifdef CONFIG_RSEQ_SLICE_EXTENSION
517 struct slice_timer {
518 	struct hrtimer	timer;
519 	void		*cookie;
520 };
521 
522 static const unsigned int rseq_slice_ext_nsecs_min =  5 * NSEC_PER_USEC;
523 static const unsigned int rseq_slice_ext_nsecs_max = 50 * NSEC_PER_USEC;
524 unsigned int rseq_slice_ext_nsecs __read_mostly = rseq_slice_ext_nsecs_min;
525 static DEFINE_PER_CPU(struct slice_timer, slice_timer);
526 DEFINE_STATIC_KEY_TRUE(rseq_slice_extension_key);
527 
528 /*
529  * When the timer expires and the task is still in user space, the return
530  * from interrupt will revoke the grant and schedule. If the task already
531  * entered the kernel via a syscall and the timer fires before the syscall
532  * work was able to cancel it, then depending on the preemption model this
533  * will either reschedule on return from interrupt or in the syscall work
534  * below.
535  */
rseq_slice_expired(struct hrtimer * tmr)536 static enum hrtimer_restart rseq_slice_expired(struct hrtimer *tmr)
537 {
538 	struct slice_timer *st = container_of(tmr, struct slice_timer, timer);
539 
540 	/*
541 	 * Validate that the task which armed the timer is still on the
542 	 * CPU. It could have been scheduled out without canceling the
543 	 * timer.
544 	 */
545 	if (st->cookie == current && current->rseq.slice.state.granted) {
546 		rseq_stat_inc(rseq_stats.s_expired);
547 		set_need_resched_current();
548 	}
549 	return HRTIMER_NORESTART;
550 }
551 
__rseq_arm_slice_extension_timer(void)552 bool __rseq_arm_slice_extension_timer(void)
553 {
554 	struct slice_timer *st = this_cpu_ptr(&slice_timer);
555 	struct task_struct *curr = current;
556 
557 	lockdep_assert_irqs_disabled();
558 
559 	/*
560 	 * This check prevents a task, which got a time slice extension
561 	 * granted, from exceeding the maximum scheduling latency when the
562 	 * grant expired before going out to user space. Don't bother to
563 	 * clear the grant here, it will be cleaned up automatically before
564 	 * going out to user space after being scheduled back in.
565 	 */
566 	if ((unlikely(curr->rseq.slice.expires < ktime_get_mono_fast_ns()))) {
567 		set_need_resched_current();
568 		return true;
569 	}
570 
571 	/*
572 	 * Store the task pointer as a cookie for comparison in the timer
573 	 * function. This is safe as the timer is CPU local and cannot be
574 	 * in the expiry function at this point.
575 	 */
576 	st->cookie = curr;
577 	hrtimer_start(&st->timer, curr->rseq.slice.expires, HRTIMER_MODE_ABS_PINNED_HARD);
578 	/* Arm the syscall entry work */
579 	set_task_syscall_work(curr, SYSCALL_RSEQ_SLICE);
580 	return false;
581 }
582 
rseq_cancel_slice_extension_timer(void)583 static void rseq_cancel_slice_extension_timer(void)
584 {
585 	struct slice_timer *st = this_cpu_ptr(&slice_timer);
586 
587 	/*
588 	 * st->cookie can be safely read as preemption is disabled and the
589 	 * timer is CPU local.
590 	 *
591 	 * As this is most probably the first expiring timer, the cancel is
592 	 * expensive as it has to reprogram the hardware, but that's less
593 	 * expensive than going through a full hrtimer_interrupt() cycle
594 	 * for nothing.
595 	 *
596 	 * hrtimer_try_to_cancel() is sufficient here as the timer is CPU
597 	 * local and once the hrtimer code disabled interrupts the timer
598 	 * callback cannot be running.
599 	 */
600 	if (st->cookie == current)
601 		hrtimer_try_to_cancel(&st->timer);
602 }
603 
rseq_slice_set_need_resched(struct task_struct * curr)604 static inline void rseq_slice_set_need_resched(struct task_struct *curr)
605 {
606 	/*
607 	 * The interrupt guard is required to prevent inconsistent state in
608 	 * this case:
609 	 *
610 	 * set_tsk_need_resched()
611 	 * --> Interrupt
612 	 *       wakeup()
613 	 *        set_tsk_need_resched()
614 	 *	  set_preempt_need_resched()
615 	 *     schedule_on_return()
616 	 *        clear_tsk_need_resched()
617 	 *	  clear_preempt_need_resched()
618 	 * set_preempt_need_resched()		<- Inconsistent state
619 	 *
620 	 * This is safe vs. a remote set of TIF_NEED_RESCHED because that
621 	 * only sets the already set bit and does not create inconsistent
622 	 * state.
623 	 */
624 	scoped_guard(irq)
625 		set_need_resched_current();
626 }
627 
rseq_slice_validate_ctrl(u32 expected)628 static void rseq_slice_validate_ctrl(u32 expected)
629 {
630 	u32 __user *sctrl = &current->rseq.usrptr->slice_ctrl.all;
631 	u32 uval;
632 
633 	if (get_user(uval, sctrl) || uval != expected)
634 		force_sig(SIGSEGV);
635 }
636 
637 /*
638  * Invoked from syscall entry if a time slice extension was granted and the
639  * kernel did not clear it before user space left the critical section.
640  *
641  * While the recommended way to relinquish the CPU side effect free is
642  * rseq_slice_yield(2), any syscall within a granted slice terminates the
643  * grant and immediately reschedules if required. This supports onion layer
644  * applications, where the code requesting the grant cannot control the
645  * code within the critical section.
646  */
rseq_syscall_enter_work(long syscall)647 void rseq_syscall_enter_work(long syscall)
648 {
649 	struct task_struct *curr = current;
650 	struct rseq_slice_ctrl ctrl = { .granted = curr->rseq.slice.state.granted };
651 
652 	clear_task_syscall_work(curr, SYSCALL_RSEQ_SLICE);
653 
654 	if (static_branch_unlikely(&rseq_debug_enabled))
655 		rseq_slice_validate_ctrl(ctrl.all);
656 
657 	/*
658 	 * The kernel might have raced, revoked the grant and updated
659 	 * userspace, but kept the SLICE work set.
660 	 */
661 	if (!ctrl.granted)
662 		return;
663 
664 	/*
665 	 * Required to stabilize the per CPU timer pointer and to make
666 	 * set_tsk_need_resched() correct on PREEMPT[RT] kernels.
667 	 *
668 	 * Leaving the scope will reschedule on preemption models FULL,
669 	 * LAZY and RT if necessary.
670 	 */
671 	scoped_guard(preempt) {
672 		rseq_cancel_slice_extension_timer();
673 		/*
674 		 * Now that preemption is disabled, quickly check whether
675 		 * the task was already rescheduled before arriving here.
676 		 */
677 		if (!curr->rseq.event.sched_switch) {
678 			rseq_slice_set_need_resched(curr);
679 
680 			if (syscall == __NR_rseq_slice_yield) {
681 				rseq_stat_inc(rseq_stats.s_yielded);
682 				/* Update the yielded state for syscall return */
683 				curr->rseq.slice.yielded = 1;
684 			} else {
685 				rseq_stat_inc(rseq_stats.s_aborted);
686 			}
687 		}
688 	}
689 	/* Reschedule on NONE/VOLUNTARY preemption models */
690 	cond_resched();
691 
692 	/* Clear the grant in kernel state and user space */
693 	curr->rseq.slice.state.granted = false;
694 	if (put_user(0U, &curr->rseq.usrptr->slice_ctrl.all))
695 		force_sig(SIGSEGV);
696 }
697 
rseq_slice_extension_prctl(unsigned long arg2,unsigned long arg3)698 int rseq_slice_extension_prctl(unsigned long arg2, unsigned long arg3)
699 {
700 	switch (arg2) {
701 	case PR_RSEQ_SLICE_EXTENSION_GET:
702 		if (arg3)
703 			return -EINVAL;
704 		return current->rseq.slice.state.enabled ? PR_RSEQ_SLICE_EXT_ENABLE : 0;
705 
706 	case PR_RSEQ_SLICE_EXTENSION_SET: {
707 		u32 rflags, valid = RSEQ_CS_FLAG_SLICE_EXT_AVAILABLE;
708 		bool enable = !!(arg3 & PR_RSEQ_SLICE_EXT_ENABLE);
709 
710 		if (arg3 & ~PR_RSEQ_SLICE_EXT_ENABLE)
711 			return -EINVAL;
712 		if (!rseq_slice_extension_enabled())
713 			return -ENOTSUPP;
714 		if (!current->rseq.usrptr)
715 			return -ENXIO;
716 
717 		/* No change? */
718 		if (enable == !!current->rseq.slice.state.enabled)
719 			return 0;
720 
721 		if (get_user(rflags, &current->rseq.usrptr->flags))
722 			goto die;
723 
724 		if (current->rseq.slice.state.enabled)
725 			valid |= RSEQ_CS_FLAG_SLICE_EXT_ENABLED;
726 
727 		if ((rflags & valid) != valid)
728 			goto die;
729 
730 		rflags &= ~RSEQ_CS_FLAG_SLICE_EXT_ENABLED;
731 		rflags |= RSEQ_CS_FLAG_SLICE_EXT_AVAILABLE;
732 		if (enable)
733 			rflags |= RSEQ_CS_FLAG_SLICE_EXT_ENABLED;
734 
735 		if (put_user(rflags, &current->rseq.usrptr->flags))
736 			goto die;
737 
738 		current->rseq.slice.state.enabled = enable;
739 		return 0;
740 	}
741 	default:
742 		return -EINVAL;
743 	}
744 die:
745 	force_sig(SIGSEGV);
746 	return -EFAULT;
747 }
748 
749 /**
750  * sys_rseq_slice_yield - yield the current processor side effect free if a
751  *			  task granted with a time slice extension is done with
752  *			  the critical work before being forced out.
753  *
754  * Return: 1 if the task successfully yielded the CPU within the granted slice.
755  *         0 if the slice extension was either never granted or was revoked by
756  *	     going over the granted extension, using a syscall other than this one
757  *	     or being scheduled out earlier due to a subsequent interrupt.
758  *
759  * The syscall does not schedule because the syscall entry work immediately
760  * relinquishes the CPU and schedules if required.
761  */
SYSCALL_DEFINE0(rseq_slice_yield)762 SYSCALL_DEFINE0(rseq_slice_yield)
763 {
764 	int yielded = !!current->rseq.slice.yielded;
765 
766 	current->rseq.slice.yielded = 0;
767 	return yielded;
768 }
769 
rseq_slice_ext_show(struct seq_file * m,void * p)770 static int rseq_slice_ext_show(struct seq_file *m, void *p)
771 {
772 	seq_printf(m, "%d\n", rseq_slice_ext_nsecs);
773 	return 0;
774 }
775 
rseq_slice_ext_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)776 static ssize_t rseq_slice_ext_write(struct file *file, const char __user *ubuf,
777 				    size_t count, loff_t *ppos)
778 {
779 	unsigned int nsecs;
780 
781 	if (kstrtouint_from_user(ubuf, count, 10, &nsecs))
782 		return -EINVAL;
783 
784 	if (nsecs < rseq_slice_ext_nsecs_min)
785 		return -ERANGE;
786 
787 	if (nsecs > rseq_slice_ext_nsecs_max)
788 		return -ERANGE;
789 
790 	rseq_slice_ext_nsecs = nsecs;
791 
792 	return count;
793 }
794 
rseq_slice_ext_open(struct inode * inode,struct file * file)795 static int rseq_slice_ext_open(struct inode *inode, struct file *file)
796 {
797 	return single_open(file, rseq_slice_ext_show, inode->i_private);
798 }
799 
800 static const struct file_operations slice_ext_ops = {
801 	.open		= rseq_slice_ext_open,
802 	.read		= seq_read,
803 	.write		= rseq_slice_ext_write,
804 	.llseek		= seq_lseek,
805 	.release	= single_release,
806 };
807 
rseq_slice_ext_init(struct dentry * root_dir)808 static void rseq_slice_ext_init(struct dentry *root_dir)
809 {
810 	debugfs_create_file("slice_ext_nsec", 0644, root_dir, NULL, &slice_ext_ops);
811 }
812 
rseq_slice_cmdline(char * str)813 static int __init rseq_slice_cmdline(char *str)
814 {
815 	bool on;
816 
817 	if (kstrtobool(str, &on))
818 		return 0;
819 
820 	if (!on)
821 		static_branch_disable(&rseq_slice_extension_key);
822 	return 1;
823 }
824 __setup("rseq_slice_ext=", rseq_slice_cmdline);
825 
rseq_slice_init(void)826 static int __init rseq_slice_init(void)
827 {
828 	unsigned int cpu;
829 
830 	for_each_possible_cpu(cpu) {
831 		hrtimer_setup(per_cpu_ptr(&slice_timer.timer, cpu), rseq_slice_expired,
832 			      CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED_HARD);
833 	}
834 	return 0;
835 }
836 device_initcall(rseq_slice_init);
837 #else
rseq_slice_ext_init(struct dentry * root_dir)838 static void rseq_slice_ext_init(struct dentry *root_dir) { }
839 #endif /* CONFIG_RSEQ_SLICE_EXTENSION */
840