xref: /linux/kernel/rseq.c (revision 830969e7821af377bdc1bb016929ff28c78490e8)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Restartable sequences system call
4  *
5  * Copyright (C) 2015, Google, Inc.,
6  * Paul Turner <pjt@google.com> and Andrew Hunter <ahh@google.com>
7  * Copyright (C) 2015-2018, EfficiOS Inc.,
8  * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9  */
10 
11 /*
12  * Restartable sequences are a lightweight interface that allows
13  * user-level code to be executed atomically relative to scheduler
14  * preemption and signal delivery. Typically used for implementing
15  * per-cpu operations.
16  *
17  * It allows user-space to perform update operations on per-cpu data
18  * without requiring heavy-weight atomic operations.
19  *
20  * Detailed algorithm of rseq user-space assembly sequences:
21  *
22  *                     init(rseq_cs)
23  *                     cpu = TLS->rseq::cpu_id_start
24  *   [1]               TLS->rseq::rseq_cs = rseq_cs
25  *   [start_ip]        ----------------------------
26  *   [2]               if (cpu != TLS->rseq::cpu_id)
27  *                             goto abort_ip;
28  *   [3]               <last_instruction_in_cs>
29  *   [post_commit_ip]  ----------------------------
30  *
31  *   The address of jump target abort_ip must be outside the critical
32  *   region, i.e.:
33  *
34  *     [abort_ip] < [start_ip]  || [abort_ip] >= [post_commit_ip]
35  *
36  *   Steps [2]-[3] (inclusive) need to be a sequence of instructions in
37  *   userspace that can handle being interrupted between any of those
38  *   instructions, and then resumed to the abort_ip.
39  *
40  *   1.  Userspace stores the address of the struct rseq_cs assembly
41  *       block descriptor into the rseq_cs field of the registered
42  *       struct rseq TLS area. This update is performed through a single
43  *       store within the inline assembly instruction sequence.
44  *       [start_ip]
45  *
46  *   2.  Userspace tests to check whether the current cpu_id field match
47  *       the cpu number loaded before start_ip, branching to abort_ip
48  *       in case of a mismatch.
49  *
50  *       If the sequence is preempted or interrupted by a signal
51  *       at or after start_ip and before post_commit_ip, then the kernel
52  *       clears TLS->__rseq_abi::rseq_cs, and sets the user-space return
53  *       ip to abort_ip before returning to user-space, so the preempted
54  *       execution resumes at abort_ip.
55  *
56  *   3.  Userspace critical section final instruction before
57  *       post_commit_ip is the commit. The critical section is
58  *       self-terminating.
59  *       [post_commit_ip]
60  *
61  *   4.  <success>
62  *
63  *   On failure at [2], or if interrupted by preempt or signal delivery
64  *   between [1] and [3]:
65  *
66  *       [abort_ip]
67  *   F1. <failure>
68  */
69 
70 /* Required to select the proper per_cpu ops for rseq_stats_inc() */
71 #define RSEQ_BUILD_SLOW_PATH
72 
73 #include <linux/debugfs.h>
74 #include <linux/hrtimer.h>
75 #include <linux/percpu.h>
76 #include <linux/prctl.h>
77 #include <linux/ratelimit.h>
78 #include <linux/rseq_entry.h>
79 #include <linux/sched.h>
80 #include <linux/syscalls.h>
81 #include <linux/uaccess.h>
82 #include <linux/types.h>
83 #include <asm/ptrace.h>
84 
85 #define CREATE_TRACE_POINTS
86 #include <trace/events/rseq.h>
87 
88 DEFINE_STATIC_KEY_MAYBE(CONFIG_RSEQ_DEBUG_DEFAULT_ENABLE, rseq_debug_enabled);
89 
90 static inline void rseq_control_debug(bool on)
91 {
92 	if (on)
93 		static_branch_enable(&rseq_debug_enabled);
94 	else
95 		static_branch_disable(&rseq_debug_enabled);
96 }
97 
98 static int __init rseq_setup_debug(char *str)
99 {
100 	bool on;
101 
102 	if (kstrtobool(str, &on))
103 		return -EINVAL;
104 	rseq_control_debug(on);
105 	return 1;
106 }
107 __setup("rseq_debug=", rseq_setup_debug);
108 
109 #ifdef CONFIG_TRACEPOINTS
110 /*
111  * Out of line, so the actual update functions can be in a header to be
112  * inlined into the exit to user code.
113  */
114 void __rseq_trace_update(struct task_struct *t)
115 {
116 	trace_rseq_update(t);
117 }
118 
119 void __rseq_trace_ip_fixup(unsigned long ip, unsigned long start_ip,
120 			   unsigned long offset, unsigned long abort_ip)
121 {
122 	trace_rseq_ip_fixup(ip, start_ip, offset, abort_ip);
123 }
124 #endif /* CONFIG_TRACEPOINTS */
125 
126 #ifdef CONFIG_DEBUG_FS
127 #ifdef CONFIG_RSEQ_STATS
128 DEFINE_PER_CPU(struct rseq_stats, rseq_stats);
129 
130 static int rseq_stats_show(struct seq_file *m, void *p)
131 {
132 	struct rseq_stats stats = { };
133 	unsigned int cpu;
134 
135 	for_each_possible_cpu(cpu) {
136 		stats.exit	+= data_race(per_cpu(rseq_stats.exit, cpu));
137 		stats.signal	+= data_race(per_cpu(rseq_stats.signal, cpu));
138 		stats.slowpath	+= data_race(per_cpu(rseq_stats.slowpath, cpu));
139 		stats.fastpath	+= data_race(per_cpu(rseq_stats.fastpath, cpu));
140 		stats.ids	+= data_race(per_cpu(rseq_stats.ids, cpu));
141 		stats.cs	+= data_race(per_cpu(rseq_stats.cs, cpu));
142 		stats.clear	+= data_race(per_cpu(rseq_stats.clear, cpu));
143 		stats.fixup	+= data_race(per_cpu(rseq_stats.fixup, cpu));
144 		if (IS_ENABLED(CONFIG_RSEQ_SLICE_EXTENSION)) {
145 			stats.s_granted	+= data_race(per_cpu(rseq_stats.s_granted, cpu));
146 			stats.s_expired	+= data_race(per_cpu(rseq_stats.s_expired, cpu));
147 			stats.s_revoked	+= data_race(per_cpu(rseq_stats.s_revoked, cpu));
148 			stats.s_yielded	+= data_race(per_cpu(rseq_stats.s_yielded, cpu));
149 			stats.s_aborted	+= data_race(per_cpu(rseq_stats.s_aborted, cpu));
150 		}
151 	}
152 
153 	seq_printf(m, "exit:   %16lu\n", stats.exit);
154 	seq_printf(m, "signal: %16lu\n", stats.signal);
155 	seq_printf(m, "slowp:  %16lu\n", stats.slowpath);
156 	seq_printf(m, "fastp:  %16lu\n", stats.fastpath);
157 	seq_printf(m, "ids:    %16lu\n", stats.ids);
158 	seq_printf(m, "cs:     %16lu\n", stats.cs);
159 	seq_printf(m, "clear:  %16lu\n", stats.clear);
160 	seq_printf(m, "fixup:  %16lu\n", stats.fixup);
161 	if (IS_ENABLED(CONFIG_RSEQ_SLICE_EXTENSION)) {
162 		seq_printf(m, "sgrant: %16lu\n", stats.s_granted);
163 		seq_printf(m, "sexpir: %16lu\n", stats.s_expired);
164 		seq_printf(m, "srevok: %16lu\n", stats.s_revoked);
165 		seq_printf(m, "syield: %16lu\n", stats.s_yielded);
166 		seq_printf(m, "sabort: %16lu\n", stats.s_aborted);
167 	}
168 	return 0;
169 }
170 
171 static int rseq_stats_open(struct inode *inode, struct file *file)
172 {
173 	return single_open(file, rseq_stats_show, inode->i_private);
174 }
175 
176 static const struct file_operations stat_ops = {
177 	.open		= rseq_stats_open,
178 	.read		= seq_read,
179 	.llseek		= seq_lseek,
180 	.release	= single_release,
181 };
182 
183 static int __init rseq_stats_init(struct dentry *root_dir)
184 {
185 	debugfs_create_file("stats", 0444, root_dir, NULL, &stat_ops);
186 	return 0;
187 }
188 #else
189 static inline void rseq_stats_init(struct dentry *root_dir) { }
190 #endif /* CONFIG_RSEQ_STATS */
191 
192 static int rseq_debug_show(struct seq_file *m, void *p)
193 {
194 	bool on = static_branch_unlikely(&rseq_debug_enabled);
195 
196 	seq_printf(m, "%d\n", on);
197 	return 0;
198 }
199 
200 static ssize_t rseq_debug_write(struct file *file, const char __user *ubuf,
201 			    size_t count, loff_t *ppos)
202 {
203 	bool on;
204 
205 	if (kstrtobool_from_user(ubuf, count, &on))
206 		return -EINVAL;
207 
208 	rseq_control_debug(on);
209 	return count;
210 }
211 
212 static int rseq_debug_open(struct inode *inode, struct file *file)
213 {
214 	return single_open(file, rseq_debug_show, inode->i_private);
215 }
216 
217 static const struct file_operations debug_ops = {
218 	.open		= rseq_debug_open,
219 	.read		= seq_read,
220 	.write		= rseq_debug_write,
221 	.llseek		= seq_lseek,
222 	.release	= single_release,
223 };
224 
225 static int __init rseq_debugfs_init(void)
226 {
227 	struct dentry *root_dir = debugfs_create_dir("rseq", NULL);
228 
229 	debugfs_create_file("debug", 0644, root_dir, NULL, &debug_ops);
230 	rseq_stats_init(root_dir);
231 	return 0;
232 }
233 __initcall(rseq_debugfs_init);
234 #endif /* CONFIG_DEBUG_FS */
235 
236 static bool rseq_set_ids(struct task_struct *t, struct rseq_ids *ids, u32 node_id)
237 {
238 	return rseq_set_ids_get_csaddr(t, ids, node_id, NULL);
239 }
240 
241 static bool rseq_handle_cs(struct task_struct *t, struct pt_regs *regs)
242 {
243 	struct rseq __user *urseq = t->rseq.usrptr;
244 	u64 csaddr;
245 
246 	scoped_user_read_access(urseq, efault)
247 		unsafe_get_user(csaddr, &urseq->rseq_cs, efault);
248 	if (likely(!csaddr))
249 		return true;
250 	return rseq_update_user_cs(t, regs, csaddr);
251 efault:
252 	return false;
253 }
254 
255 static void rseq_slowpath_update_usr(struct pt_regs *regs)
256 {
257 	/*
258 	 * Preserve rseq state and user_irq state. The generic entry code
259 	 * clears user_irq on the way out, the non-generic entry
260 	 * architectures are not having user_irq.
261 	 */
262 	const struct rseq_event evt_mask = { .has_rseq = true, .user_irq = true, };
263 	struct task_struct *t = current;
264 	struct rseq_ids ids;
265 	u32 node_id;
266 	bool event;
267 
268 	if (unlikely(t->flags & PF_EXITING))
269 		return;
270 
271 	rseq_stat_inc(rseq_stats.slowpath);
272 
273 	/*
274 	 * Read and clear the event pending bit first. If the task
275 	 * was not preempted or migrated or a signal is on the way,
276 	 * there is no point in doing any of the heavy lifting here
277 	 * on production kernels. In that case TIF_NOTIFY_RESUME
278 	 * was raised by some other functionality.
279 	 *
280 	 * This is correct because the read/clear operation is
281 	 * guarded against scheduler preemption, which makes it CPU
282 	 * local atomic. If the task is preempted right after
283 	 * re-enabling preemption then TIF_NOTIFY_RESUME is set
284 	 * again and this function is invoked another time _before_
285 	 * the task is able to return to user mode.
286 	 *
287 	 * On a debug kernel, invoke the fixup code unconditionally
288 	 * with the result handed in to allow the detection of
289 	 * inconsistencies.
290 	 */
291 	scoped_guard(irq) {
292 		event = t->rseq.event.sched_switch;
293 		t->rseq.event.all &= evt_mask.all;
294 		ids.cpu_id = task_cpu(t);
295 		ids.mm_cid = task_mm_cid(t);
296 	}
297 
298 	if (!event)
299 		return;
300 
301 	node_id = cpu_to_node(ids.cpu_id);
302 
303 	if (unlikely(!rseq_update_usr(t, regs, &ids, node_id))) {
304 		/*
305 		 * Clear the errors just in case this might survive magically, but
306 		 * leave the rest intact.
307 		 */
308 		t->rseq.event.error = 0;
309 		force_sig(SIGSEGV);
310 	}
311 }
312 
313 void __rseq_handle_slowpath(struct pt_regs *regs)
314 {
315 	/*
316 	 * If invoked from hypervisors before entering the guest via
317 	 * resume_user_mode_work(), then @regs is a NULL pointer.
318 	 *
319 	 * resume_user_mode_work() clears TIF_NOTIFY_RESUME and re-raises
320 	 * it before returning from the ioctl() to user space when
321 	 * rseq_event.sched_switch is set.
322 	 *
323 	 * So it's safe to ignore here instead of pointlessly updating it
324 	 * in the vcpu_run() loop.
325 	 */
326 	if (!regs)
327 		return;
328 
329 	rseq_slowpath_update_usr(regs);
330 }
331 
332 void __rseq_signal_deliver(int sig, struct pt_regs *regs)
333 {
334 	rseq_stat_inc(rseq_stats.signal);
335 	/*
336 	 * Don't update IDs, they are handled on exit to user if
337 	 * necessary. The important thing is to abort a critical section of
338 	 * the interrupted context as after this point the instruction
339 	 * pointer in @regs points to the signal handler.
340 	 */
341 	if (unlikely(!rseq_handle_cs(current, regs))) {
342 		/*
343 		 * Clear the errors just in case this might survive
344 		 * magically, but leave the rest intact.
345 		 */
346 		current->rseq.event.error = 0;
347 		force_sigsegv(sig);
348 	}
349 }
350 
351 /*
352  * Terminate the process if a syscall is issued within a restartable
353  * sequence.
354  */
355 void __rseq_debug_syscall_return(struct pt_regs *regs)
356 {
357 	struct task_struct *t = current;
358 	u64 csaddr;
359 
360 	if (!t->rseq.event.has_rseq)
361 		return;
362 	if (get_user(csaddr, &t->rseq.usrptr->rseq_cs))
363 		goto fail;
364 	if (likely(!csaddr))
365 		return;
366 	if (unlikely(csaddr >= TASK_SIZE))
367 		goto fail;
368 	if (rseq_debug_update_user_cs(t, regs, csaddr))
369 		return;
370 fail:
371 	force_sig(SIGSEGV);
372 }
373 
374 #ifdef CONFIG_DEBUG_RSEQ
375 /* Kept around to keep GENERIC_ENTRY=n architectures supported. */
376 void rseq_syscall(struct pt_regs *regs)
377 {
378 	__rseq_debug_syscall_return(regs);
379 }
380 #endif
381 
382 static bool rseq_reset_ids(void)
383 {
384 	struct rseq_ids ids = {
385 		.cpu_id		= RSEQ_CPU_ID_UNINITIALIZED,
386 		.mm_cid		= 0,
387 	};
388 
389 	/*
390 	 * If this fails, terminate it because this leaves the kernel in
391 	 * stupid state as exit to user space will try to fixup the ids
392 	 * again.
393 	 */
394 	if (rseq_set_ids(current, &ids, 0))
395 		return true;
396 
397 	force_sig(SIGSEGV);
398 	return false;
399 }
400 
401 /* The original rseq structure size (including padding) is 32 bytes. */
402 #define ORIG_RSEQ_SIZE		32
403 
404 /*
405  * sys_rseq - setup restartable sequences for caller thread.
406  */
407 SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32, sig)
408 {
409 	u32 rseqfl = 0;
410 
411 	if (flags & RSEQ_FLAG_UNREGISTER) {
412 		if (flags & ~RSEQ_FLAG_UNREGISTER)
413 			return -EINVAL;
414 		/* Unregister rseq for current thread. */
415 		if (current->rseq.usrptr != rseq || !current->rseq.usrptr)
416 			return -EINVAL;
417 		if (rseq_len != current->rseq.len)
418 			return -EINVAL;
419 		if (current->rseq.sig != sig)
420 			return -EPERM;
421 		if (!rseq_reset_ids())
422 			return -EFAULT;
423 		rseq_reset(current);
424 		return 0;
425 	}
426 
427 	if (unlikely(flags))
428 		return -EINVAL;
429 
430 	if (current->rseq.usrptr) {
431 		/*
432 		 * If rseq is already registered, check whether
433 		 * the provided address differs from the prior
434 		 * one.
435 		 */
436 		if (current->rseq.usrptr != rseq || rseq_len != current->rseq.len)
437 			return -EINVAL;
438 		if (current->rseq.sig != sig)
439 			return -EPERM;
440 		/* Already registered. */
441 		return -EBUSY;
442 	}
443 
444 	/*
445 	 * If there was no rseq previously registered, ensure the provided rseq
446 	 * is properly aligned, as communcated to user-space through the ELF
447 	 * auxiliary vector AT_RSEQ_ALIGN. If rseq_len is the original rseq
448 	 * size, the required alignment is the original struct rseq alignment.
449 	 *
450 	 * In order to be valid, rseq_len is either the original rseq size, or
451 	 * large enough to contain all supported fields, as communicated to
452 	 * user-space through the ELF auxiliary vector AT_RSEQ_FEATURE_SIZE.
453 	 */
454 	if (rseq_len < ORIG_RSEQ_SIZE ||
455 	    (rseq_len == ORIG_RSEQ_SIZE && !IS_ALIGNED((unsigned long)rseq, ORIG_RSEQ_SIZE)) ||
456 	    (rseq_len != ORIG_RSEQ_SIZE && (!IS_ALIGNED((unsigned long)rseq, __alignof__(*rseq)) ||
457 					    rseq_len < offsetof(struct rseq, end))))
458 		return -EINVAL;
459 	if (!access_ok(rseq, rseq_len))
460 		return -EFAULT;
461 
462 	if (IS_ENABLED(CONFIG_RSEQ_SLICE_EXTENSION))
463 		rseqfl |= RSEQ_CS_FLAG_SLICE_EXT_AVAILABLE;
464 
465 	scoped_user_write_access(rseq, efault) {
466 		/*
467 		 * If the rseq_cs pointer is non-NULL on registration, clear it to
468 		 * avoid a potential segfault on return to user-space. The proper thing
469 		 * to do would have been to fail the registration but this would break
470 		 * older libcs that reuse the rseq area for new threads without
471 		 * clearing the fields. Don't bother reading it, just reset it.
472 		 */
473 		unsafe_put_user(0UL, &rseq->rseq_cs, efault);
474 		unsafe_put_user(rseqfl, &rseq->flags, efault);
475 		/* Initialize IDs in user space */
476 		unsafe_put_user(RSEQ_CPU_ID_UNINITIALIZED, &rseq->cpu_id_start, efault);
477 		unsafe_put_user(RSEQ_CPU_ID_UNINITIALIZED, &rseq->cpu_id, efault);
478 		unsafe_put_user(0U, &rseq->node_id, efault);
479 		unsafe_put_user(0U, &rseq->mm_cid, efault);
480 		unsafe_put_user(0U, &rseq->slice_ctrl.all, efault);
481 	}
482 
483 	/*
484 	 * Activate the registration by setting the rseq area address, length
485 	 * and signature in the task struct.
486 	 */
487 	current->rseq.usrptr = rseq;
488 	current->rseq.len = rseq_len;
489 	current->rseq.sig = sig;
490 
491 	/*
492 	 * If rseq was previously inactive, and has just been
493 	 * registered, ensure the cpu_id_start and cpu_id fields
494 	 * are updated before returning to user-space.
495 	 */
496 	current->rseq.event.has_rseq = true;
497 	rseq_force_update();
498 	return 0;
499 
500 efault:
501 	return -EFAULT;
502 }
503 
504 #ifdef CONFIG_RSEQ_SLICE_EXTENSION
505 struct slice_timer {
506 	struct hrtimer	timer;
507 	void		*cookie;
508 };
509 
510 unsigned int rseq_slice_ext_nsecs __read_mostly = 10 * NSEC_PER_USEC;
511 static DEFINE_PER_CPU(struct slice_timer, slice_timer);
512 DEFINE_STATIC_KEY_TRUE(rseq_slice_extension_key);
513 
514 /*
515  * When the timer expires and the task is still in user space, the return
516  * from interrupt will revoke the grant and schedule. If the task already
517  * entered the kernel via a syscall and the timer fires before the syscall
518  * work was able to cancel it, then depending on the preemption model this
519  * will either reschedule on return from interrupt or in the syscall work
520  * below.
521  */
522 static enum hrtimer_restart rseq_slice_expired(struct hrtimer *tmr)
523 {
524 	struct slice_timer *st = container_of(tmr, struct slice_timer, timer);
525 
526 	/*
527 	 * Validate that the task which armed the timer is still on the
528 	 * CPU. It could have been scheduled out without canceling the
529 	 * timer.
530 	 */
531 	if (st->cookie == current && current->rseq.slice.state.granted) {
532 		rseq_stat_inc(rseq_stats.s_expired);
533 		set_need_resched_current();
534 	}
535 	return HRTIMER_NORESTART;
536 }
537 
538 bool __rseq_arm_slice_extension_timer(void)
539 {
540 	struct slice_timer *st = this_cpu_ptr(&slice_timer);
541 	struct task_struct *curr = current;
542 
543 	lockdep_assert_irqs_disabled();
544 
545 	/*
546 	 * This check prevents a task, which got a time slice extension
547 	 * granted, from exceeding the maximum scheduling latency when the
548 	 * grant expired before going out to user space. Don't bother to
549 	 * clear the grant here, it will be cleaned up automatically before
550 	 * going out to user space after being scheduled back in.
551 	 */
552 	if ((unlikely(curr->rseq.slice.expires < ktime_get_mono_fast_ns()))) {
553 		set_need_resched_current();
554 		return true;
555 	}
556 
557 	/*
558 	 * Store the task pointer as a cookie for comparison in the timer
559 	 * function. This is safe as the timer is CPU local and cannot be
560 	 * in the expiry function at this point.
561 	 */
562 	st->cookie = curr;
563 	hrtimer_start(&st->timer, curr->rseq.slice.expires, HRTIMER_MODE_ABS_PINNED_HARD);
564 	/* Arm the syscall entry work */
565 	set_task_syscall_work(curr, SYSCALL_RSEQ_SLICE);
566 	return false;
567 }
568 
569 static void rseq_cancel_slice_extension_timer(void)
570 {
571 	struct slice_timer *st = this_cpu_ptr(&slice_timer);
572 
573 	/*
574 	 * st->cookie can be safely read as preemption is disabled and the
575 	 * timer is CPU local.
576 	 *
577 	 * As this is most probably the first expiring timer, the cancel is
578 	 * expensive as it has to reprogram the hardware, but that's less
579 	 * expensive than going through a full hrtimer_interrupt() cycle
580 	 * for nothing.
581 	 *
582 	 * hrtimer_try_to_cancel() is sufficient here as the timer is CPU
583 	 * local and once the hrtimer code disabled interrupts the timer
584 	 * callback cannot be running.
585 	 */
586 	if (st->cookie == current)
587 		hrtimer_try_to_cancel(&st->timer);
588 }
589 
590 static inline void rseq_slice_set_need_resched(struct task_struct *curr)
591 {
592 	/*
593 	 * The interrupt guard is required to prevent inconsistent state in
594 	 * this case:
595 	 *
596 	 * set_tsk_need_resched()
597 	 * --> Interrupt
598 	 *       wakeup()
599 	 *        set_tsk_need_resched()
600 	 *	  set_preempt_need_resched()
601 	 *     schedule_on_return()
602 	 *        clear_tsk_need_resched()
603 	 *	  clear_preempt_need_resched()
604 	 * set_preempt_need_resched()		<- Inconsistent state
605 	 *
606 	 * This is safe vs. a remote set of TIF_NEED_RESCHED because that
607 	 * only sets the already set bit and does not create inconsistent
608 	 * state.
609 	 */
610 	scoped_guard(irq)
611 		set_need_resched_current();
612 }
613 
614 static void rseq_slice_validate_ctrl(u32 expected)
615 {
616 	u32 __user *sctrl = &current->rseq.usrptr->slice_ctrl.all;
617 	u32 uval;
618 
619 	if (get_user(uval, sctrl) || uval != expected)
620 		force_sig(SIGSEGV);
621 }
622 
623 /*
624  * Invoked from syscall entry if a time slice extension was granted and the
625  * kernel did not clear it before user space left the critical section.
626  *
627  * While the recommended way to relinquish the CPU side effect free is
628  * rseq_slice_yield(2), any syscall within a granted slice terminates the
629  * grant and immediately reschedules if required. This supports onion layer
630  * applications, where the code requesting the grant cannot control the
631  * code within the critical section.
632  */
633 void rseq_syscall_enter_work(long syscall)
634 {
635 	struct task_struct *curr = current;
636 	struct rseq_slice_ctrl ctrl = { .granted = curr->rseq.slice.state.granted };
637 
638 	clear_task_syscall_work(curr, SYSCALL_RSEQ_SLICE);
639 
640 	if (static_branch_unlikely(&rseq_debug_enabled))
641 		rseq_slice_validate_ctrl(ctrl.all);
642 
643 	/*
644 	 * The kernel might have raced, revoked the grant and updated
645 	 * userspace, but kept the SLICE work set.
646 	 */
647 	if (!ctrl.granted)
648 		return;
649 
650 	/*
651 	 * Required to stabilize the per CPU timer pointer and to make
652 	 * set_tsk_need_resched() correct on PREEMPT[RT] kernels.
653 	 *
654 	 * Leaving the scope will reschedule on preemption models FULL,
655 	 * LAZY and RT if necessary.
656 	 */
657 	scoped_guard(preempt) {
658 		rseq_cancel_slice_extension_timer();
659 		/*
660 		 * Now that preemption is disabled, quickly check whether
661 		 * the task was already rescheduled before arriving here.
662 		 */
663 		if (!curr->rseq.event.sched_switch) {
664 			rseq_slice_set_need_resched(curr);
665 
666 			if (syscall == __NR_rseq_slice_yield) {
667 				rseq_stat_inc(rseq_stats.s_yielded);
668 				/* Update the yielded state for syscall return */
669 				curr->rseq.slice.yielded = 1;
670 			} else {
671 				rseq_stat_inc(rseq_stats.s_aborted);
672 			}
673 		}
674 	}
675 	/* Reschedule on NONE/VOLUNTARY preemption models */
676 	cond_resched();
677 
678 	/* Clear the grant in kernel state and user space */
679 	curr->rseq.slice.state.granted = false;
680 	if (put_user(0U, &curr->rseq.usrptr->slice_ctrl.all))
681 		force_sig(SIGSEGV);
682 }
683 
684 int rseq_slice_extension_prctl(unsigned long arg2, unsigned long arg3)
685 {
686 	switch (arg2) {
687 	case PR_RSEQ_SLICE_EXTENSION_GET:
688 		if (arg3)
689 			return -EINVAL;
690 		return current->rseq.slice.state.enabled ? PR_RSEQ_SLICE_EXT_ENABLE : 0;
691 
692 	case PR_RSEQ_SLICE_EXTENSION_SET: {
693 		u32 rflags, valid = RSEQ_CS_FLAG_SLICE_EXT_AVAILABLE;
694 		bool enable = !!(arg3 & PR_RSEQ_SLICE_EXT_ENABLE);
695 
696 		if (arg3 & ~PR_RSEQ_SLICE_EXT_ENABLE)
697 			return -EINVAL;
698 		if (!rseq_slice_extension_enabled())
699 			return -ENOTSUPP;
700 		if (!current->rseq.usrptr)
701 			return -ENXIO;
702 
703 		/* No change? */
704 		if (enable == !!current->rseq.slice.state.enabled)
705 			return 0;
706 
707 		if (get_user(rflags, &current->rseq.usrptr->flags))
708 			goto die;
709 
710 		if (current->rseq.slice.state.enabled)
711 			valid |= RSEQ_CS_FLAG_SLICE_EXT_ENABLED;
712 
713 		if ((rflags & valid) != valid)
714 			goto die;
715 
716 		rflags &= ~RSEQ_CS_FLAG_SLICE_EXT_ENABLED;
717 		rflags |= RSEQ_CS_FLAG_SLICE_EXT_AVAILABLE;
718 		if (enable)
719 			rflags |= RSEQ_CS_FLAG_SLICE_EXT_ENABLED;
720 
721 		if (put_user(rflags, &current->rseq.usrptr->flags))
722 			goto die;
723 
724 		current->rseq.slice.state.enabled = enable;
725 		return 0;
726 	}
727 	default:
728 		return -EINVAL;
729 	}
730 die:
731 	force_sig(SIGSEGV);
732 	return -EFAULT;
733 }
734 
735 /**
736  * sys_rseq_slice_yield - yield the current processor side effect free if a
737  *			  task granted with a time slice extension is done with
738  *			  the critical work before being forced out.
739  *
740  * Return: 1 if the task successfully yielded the CPU within the granted slice.
741  *         0 if the slice extension was either never granted or was revoked by
742  *	     going over the granted extension, using a syscall other than this one
743  *	     or being scheduled out earlier due to a subsequent interrupt.
744  *
745  * The syscall does not schedule because the syscall entry work immediately
746  * relinquishes the CPU and schedules if required.
747  */
748 SYSCALL_DEFINE0(rseq_slice_yield)
749 {
750 	int yielded = !!current->rseq.slice.yielded;
751 
752 	current->rseq.slice.yielded = 0;
753 	return yielded;
754 }
755 
756 #ifdef CONFIG_SYSCTL
757 static const unsigned int rseq_slice_ext_nsecs_min = 10 * NSEC_PER_USEC;
758 static const unsigned int rseq_slice_ext_nsecs_max = 50 * NSEC_PER_USEC;
759 
760 static const struct ctl_table rseq_slice_ext_sysctl[] = {
761 	{
762 		.procname	= "rseq_slice_extension_nsec",
763 		.data		= &rseq_slice_ext_nsecs,
764 		.maxlen		= sizeof(unsigned int),
765 		.mode		= 0644,
766 		.proc_handler	= proc_douintvec_minmax,
767 		.extra1		= (unsigned int *)&rseq_slice_ext_nsecs_min,
768 		.extra2		= (unsigned int *)&rseq_slice_ext_nsecs_max,
769 	},
770 };
771 
772 static void rseq_slice_sysctl_init(void)
773 {
774 	if (rseq_slice_extension_enabled())
775 		register_sysctl_init("kernel", rseq_slice_ext_sysctl);
776 }
777 #else /* CONFIG_SYSCTL */
778 static inline void rseq_slice_sysctl_init(void) { }
779 #endif  /* !CONFIG_SYSCTL */
780 
781 static int __init rseq_slice_cmdline(char *str)
782 {
783 	bool on;
784 
785 	if (kstrtobool(str, &on))
786 		return 0;
787 
788 	if (!on)
789 		static_branch_disable(&rseq_slice_extension_key);
790 	return 1;
791 }
792 __setup("rseq_slice_ext=", rseq_slice_cmdline);
793 
794 static int __init rseq_slice_init(void)
795 {
796 	unsigned int cpu;
797 
798 	for_each_possible_cpu(cpu) {
799 		hrtimer_setup(per_cpu_ptr(&slice_timer.timer, cpu), rseq_slice_expired,
800 			      CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED_HARD);
801 	}
802 	rseq_slice_sysctl_init();
803 	return 0;
804 }
805 device_initcall(rseq_slice_init);
806 #endif /* CONFIG_RSEQ_SLICE_EXTENSION */
807