xref: /linux/include/vdso/helpers.h (revision ed78b7b2c5ae679960469c0f679539c427e051ab)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __VDSO_HELPERS_H
3 #define __VDSO_HELPERS_H
4 
5 #ifndef __ASSEMBLY__
6 
7 #include <asm/barrier.h>
8 #include <vdso/datapage.h>
9 
10 static __always_inline u32 vdso_read_begin(const struct vdso_clock *vc)
11 {
12 	u32 seq;
13 
14 	while (unlikely((seq = READ_ONCE(vc->seq)) & 1))
15 		cpu_relax();
16 
17 	smp_rmb();
18 	return seq;
19 }
20 
21 /*
22  * Variant of vdso_read_begin() to handle VDSO_CLOCKMODE_TIMENS.
23  *
24  * Time namespace enabled tasks have a special VVAR page installed which has
25  * vc->seq set to 1 and vc->clock_mode set to VDSO_CLOCKMODE_TIMENS. For non
26  * time namespace affected tasks this does not affect performance because if
27  * vc->seq is odd, i.e. a concurrent update is in progress the extra check for
28  * vc->clock_mode is just a few extra instructions while spin waiting for
29  * vc->seq to become even again.
30  */
31 static __always_inline bool vdso_read_begin_timens(const struct vdso_clock *vc, u32 *seq)
32 {
33 	while (unlikely((*seq = READ_ONCE(vc->seq)) & 1)) {
34 		if (IS_ENABLED(CONFIG_TIME_NS) && vc->clock_mode == VDSO_CLOCKMODE_TIMENS)
35 			return true;
36 		cpu_relax();
37 	}
38 	smp_rmb();
39 
40 	return false;
41 }
42 
43 static __always_inline u32 vdso_read_retry(const struct vdso_clock *vc,
44 					   u32 start)
45 {
46 	u32 seq;
47 
48 	smp_rmb();
49 	seq = READ_ONCE(vc->seq);
50 	return seq != start;
51 }
52 
53 static __always_inline void vdso_write_seq_begin(struct vdso_clock *vc)
54 {
55 	/*
56 	 * WRITE_ONCE() is required otherwise the compiler can validly tear
57 	 * updates to vc->seq and it is possible that the value seen by the
58 	 * reader is inconsistent.
59 	 */
60 	WRITE_ONCE(vc->seq, vc->seq + 1);
61 }
62 
63 static __always_inline void vdso_write_seq_end(struct vdso_clock *vc)
64 {
65 	/*
66 	 * WRITE_ONCE() is required otherwise the compiler can validly tear
67 	 * updates to vc->seq and it is possible that the value seen by the
68 	 * reader is inconsistent.
69 	 */
70 	WRITE_ONCE(vc->seq, vc->seq + 1);
71 }
72 
73 static __always_inline void vdso_write_begin_clock(struct vdso_clock *vc)
74 {
75 	vdso_write_seq_begin(vc);
76 	/* Ensure the sequence invalidation is visible before data is modified */
77 	smp_wmb();
78 }
79 
80 static __always_inline void vdso_write_end_clock(struct vdso_clock *vc)
81 {
82 	/* Ensure the data update is visible before the sequence is set valid again */
83 	smp_wmb();
84 	vdso_write_seq_end(vc);
85 }
86 
87 static __always_inline void vdso_write_begin(struct vdso_time_data *vd)
88 {
89 	struct vdso_clock *vc = vd->clock_data;
90 
91 	vdso_write_seq_begin(&vc[CS_HRES_COARSE]);
92 	vdso_write_seq_begin(&vc[CS_RAW]);
93 	/* Ensure the sequence invalidation is visible before data is modified */
94 	smp_wmb();
95 }
96 
97 static __always_inline void vdso_write_end(struct vdso_time_data *vd)
98 {
99 	struct vdso_clock *vc = vd->clock_data;
100 
101 	/* Ensure the data update is visible before the sequence is set valid again */
102 	smp_wmb();
103 	vdso_write_seq_end(&vc[CS_HRES_COARSE]);
104 	vdso_write_seq_end(&vc[CS_RAW]);
105 }
106 
107 #endif /* !__ASSEMBLY__ */
108 
109 #endif /* __VDSO_HELPERS_H */
110