xref: /linux/include/linux/rseq_types.h (revision 63724e9519a312d7d0b8767d0aeb53bc15a7fdd5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_RSEQ_TYPES_H
3 #define _LINUX_RSEQ_TYPES_H
4 
5 #include <linux/irq_work_types.h>
6 #include <linux/types.h>
7 #include <linux/workqueue_types.h>
8 
9 #ifdef CONFIG_RSEQ
10 struct rseq;
11 
12 /**
13  * struct rseq_event - Storage for rseq related event management
14  * @all:		Compound to initialize and clear the data efficiently
15  * @events:		Compound to access events with a single load/store
16  * @sched_switch:	True if the task was scheduled and needs update on
17  *			exit to user
18  * @ids_changed:	Indicator that IDs need to be updated
19  * @user_irq:		True on interrupt entry from user mode
20  * @has_rseq:		True if the task has a rseq pointer installed
21  * @error:		Compound error code for the slow path to analyze
22  * @fatal:		User space data corrupted or invalid
23  * @slowpath:		Indicator that slow path processing via TIF_NOTIFY_RESUME
24  *			is required
25  *
26  * @sched_switch and @ids_changed must be adjacent and the combo must be
27  * 16bit aligned to allow a single store, when both are set at the same
28  * time in the scheduler.
29  */
30 struct rseq_event {
31 	union {
32 		u64				all;
33 		struct {
34 			union {
35 				u32		events;
36 				struct {
37 					u8	sched_switch;
38 					u8	ids_changed;
39 					u8	user_irq;
40 				};
41 			};
42 
43 			u8			has_rseq;
44 			u8			__pad;
45 			union {
46 				u16		error;
47 				struct {
48 					u8	fatal;
49 					u8	slowpath;
50 				};
51 			};
52 		};
53 	};
54 };
55 
56 /**
57  * struct rseq_ids - Cache for ids, which need to be updated
58  * @cpu_cid:	Compound of @cpu_id and @mm_cid to make the
59  *		compiler emit a single compare on 64-bit
60  * @cpu_id:	The CPU ID which was written last to user space
61  * @mm_cid:	The MM CID which was written last to user space
62  *
63  * @cpu_id and @mm_cid are updated when the data is written to user space.
64  */
65 struct rseq_ids {
66 	union {
67 		u64		cpu_cid;
68 		struct {
69 			u32	cpu_id;
70 			u32	mm_cid;
71 		};
72 	};
73 };
74 
75 /**
76  * union rseq_slice_state - Status information for rseq time slice extension
77  * @state:	Compound to access the overall state
78  * @enabled:	Time slice extension is enabled for the task
79  * @granted:	Time slice extension was granted to the task
80  */
81 union rseq_slice_state {
82 	u16			state;
83 	struct {
84 		u8		enabled;
85 		u8		granted;
86 	};
87 };
88 
89 /**
90  * struct rseq_slice - Status information for rseq time slice extension
91  * @state:	Time slice extension state
92  * @expires:	The time when a grant expires
93  * @yielded:	Indicator for rseq_slice_yield()
94  */
95 struct rseq_slice {
96 	union rseq_slice_state	state;
97 	u64			expires;
98 	u8			yielded;
99 };
100 
101 /**
102  * struct rseq_data - Storage for all rseq related data
103  * @usrptr:	Pointer to the registered user space RSEQ memory
104  * @len:	Length of the RSEQ region
105  * @sig:	Signature of critical section abort IPs
106  * @event:	Storage for event management
107  * @ids:	Storage for cached CPU ID and MM CID
108  * @slice:	Storage for time slice extension data
109  */
110 struct rseq_data {
111 	struct rseq __user		*usrptr;
112 	u32				len;
113 	u32				sig;
114 	struct rseq_event		event;
115 	struct rseq_ids			ids;
116 #ifdef CONFIG_RSEQ_SLICE_EXTENSION
117 	struct rseq_slice		slice;
118 #endif
119 };
120 
121 #else /* CONFIG_RSEQ */
122 struct rseq_data { };
123 #endif /* !CONFIG_RSEQ */
124 
125 #ifdef CONFIG_SCHED_MM_CID
126 
127 #define MM_CID_UNSET	BIT(31)
128 #define MM_CID_ONCPU	BIT(30)
129 #define MM_CID_TRANSIT	BIT(29)
130 
131 /**
132  * struct sched_mm_cid - Storage for per task MM CID data
133  * @active:	MM CID is active for the task
134  * @cid:	The CID associated to the task either permanently or
135  *		borrowed from the CPU
136  * @node:	Queued in the per MM MMCID list
137  */
138 struct sched_mm_cid {
139 	unsigned int		active;
140 	unsigned int		cid;
141 	struct hlist_node	node;
142 };
143 
144 /**
145  * struct mm_cid_pcpu - Storage for per CPU MM_CID data
146  * @cid:	The CID associated to the CPU either permanently or
147  *		while a task with a CID is running
148  */
149 struct mm_cid_pcpu {
150 	unsigned int	cid;
151 }____cacheline_aligned_in_smp;
152 
153 /**
154  * struct mm_mm_cid - Storage for per MM CID data
155  * @pcpu:		Per CPU storage for CIDs associated to a CPU
156  * @mode:		Indicates per CPU and transition mode
157  * @max_cids:		The exclusive maximum CID value for allocation and convergence
158  * @irq_work:		irq_work to handle the affinity mode change case
159  * @work:		Regular work to handle the affinity mode change case
160  * @lock:		Spinlock to protect against affinity setting which can't take @mutex
161  * @mutex:		Mutex to serialize forks and exits related to this mm
162  * @user_list:		List of the MM CID users of a MM
163  * @nr_cpus_allowed:	The number of CPUs in the per MM allowed CPUs map. The map
164  *			is growth only.
165  * @users:		The number of tasks sharing this MM. Separate from mm::mm_users
166  *			as that is modified by mmget()/mm_put() by other entities which
167  *			do not actually share the MM.
168  * @pcpu_thrs:		Threshold for switching back from per CPU mode
169  * @update_deferred:	A deferred switch back to per task mode is pending.
170  */
171 struct mm_mm_cid {
172 	/* Hotpath read mostly members */
173 	struct mm_cid_pcpu	__percpu *pcpu;
174 	unsigned int		mode;
175 	unsigned int		max_cids;
176 
177 	/* Rarely used. Moves @lock and @mutex into the second cacheline */
178 	struct irq_work		irq_work;
179 	struct work_struct	work;
180 
181 	raw_spinlock_t		lock;
182 	struct mutex		mutex;
183 	struct hlist_head	user_list;
184 
185 	/* Low frequency modified */
186 	unsigned int		nr_cpus_allowed;
187 	unsigned int		users;
188 	unsigned int		pcpu_thrs;
189 	unsigned int		update_deferred;
190 } ____cacheline_aligned;
191 #else /* CONFIG_SCHED_MM_CID */
192 struct mm_mm_cid { };
193 struct sched_mm_cid { };
194 #endif /* !CONFIG_SCHED_MM_CID */
195 
196 #endif
197