xref: /linux/include/vdso/helpers.h (revision 40286d6379aacfcc053253ef78dc78b09addffda)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __VDSO_HELPERS_H
3 #define __VDSO_HELPERS_H
4 
5 #ifndef __ASSEMBLY__
6 
7 #include <asm/barrier.h>
8 #include <vdso/datapage.h>
9 #include <vdso/processor.h>
10 #include <vdso/clocksource.h>
11 
12 static __always_inline bool vdso_is_timens_clock(const struct vdso_clock *vc)
13 {
14 	return IS_ENABLED(CONFIG_TIME_NS) && vc->clock_mode == VDSO_CLOCKMODE_TIMENS;
15 }
16 
17 static __always_inline u32 vdso_read_begin(const struct vdso_clock *vc)
18 {
19 	u32 seq;
20 
21 	while (unlikely((seq = READ_ONCE(vc->seq)) & 1))
22 		cpu_relax();
23 
24 	smp_rmb();
25 	return seq;
26 }
27 
28 /*
29  * Variant of vdso_read_begin() to handle VDSO_CLOCKMODE_TIMENS.
30  *
31  * Time namespace enabled tasks have a special VVAR page installed which has
32  * vc->seq set to 1 and vc->clock_mode set to VDSO_CLOCKMODE_TIMENS. For non
33  * time namespace affected tasks this does not affect performance because if
34  * vc->seq is odd, i.e. a concurrent update is in progress the extra check for
35  * vc->clock_mode is just a few extra instructions while spin waiting for
36  * vc->seq to become even again.
37  */
38 static __always_inline bool vdso_read_begin_timens(const struct vdso_clock *vc, u32 *seq)
39 {
40 	while (unlikely((*seq = READ_ONCE(vc->seq)) & 1)) {
41 		if (vdso_is_timens_clock(vc))
42 			return true;
43 		cpu_relax();
44 	}
45 	smp_rmb();
46 
47 	return false;
48 }
49 
50 static __always_inline u32 vdso_read_retry(const struct vdso_clock *vc,
51 					   u32 start)
52 {
53 	u32 seq;
54 
55 	smp_rmb();
56 	seq = READ_ONCE(vc->seq);
57 	return unlikely(seq != start);
58 }
59 
60 static __always_inline void vdso_write_seq_begin(struct vdso_clock *vc)
61 {
62 	/*
63 	 * WRITE_ONCE() is required otherwise the compiler can validly tear
64 	 * updates to vc->seq and it is possible that the value seen by the
65 	 * reader is inconsistent.
66 	 */
67 	WRITE_ONCE(vc->seq, vc->seq + 1);
68 }
69 
70 static __always_inline void vdso_write_seq_end(struct vdso_clock *vc)
71 {
72 	/*
73 	 * WRITE_ONCE() is required otherwise the compiler can validly tear
74 	 * updates to vc->seq and it is possible that the value seen by the
75 	 * reader is inconsistent.
76 	 */
77 	WRITE_ONCE(vc->seq, vc->seq + 1);
78 }
79 
80 static __always_inline void vdso_write_begin_clock(struct vdso_clock *vc)
81 {
82 	vdso_write_seq_begin(vc);
83 	/* Ensure the sequence invalidation is visible before data is modified */
84 	smp_wmb();
85 }
86 
87 static __always_inline void vdso_write_end_clock(struct vdso_clock *vc)
88 {
89 	/* Ensure the data update is visible before the sequence is set valid again */
90 	smp_wmb();
91 	vdso_write_seq_end(vc);
92 }
93 
94 static __always_inline void vdso_write_begin(struct vdso_time_data *vd)
95 {
96 	struct vdso_clock *vc = vd->clock_data;
97 
98 	vdso_write_seq_begin(&vc[CS_HRES_COARSE]);
99 	vdso_write_seq_begin(&vc[CS_RAW]);
100 	/* Ensure the sequence invalidation is visible before data is modified */
101 	smp_wmb();
102 }
103 
104 static __always_inline void vdso_write_end(struct vdso_time_data *vd)
105 {
106 	struct vdso_clock *vc = vd->clock_data;
107 
108 	/* Ensure the data update is visible before the sequence is set valid again */
109 	smp_wmb();
110 	vdso_write_seq_end(&vc[CS_HRES_COARSE]);
111 	vdso_write_seq_end(&vc[CS_RAW]);
112 }
113 
114 #endif /* !__ASSEMBLY__ */
115 
116 #endif /* __VDSO_HELPERS_H */
117