xref: /linux/include/linux/rseq_types.h (revision 7f0023215262221ca08d56be2203e8a4770be033)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_RSEQ_TYPES_H
3 #define _LINUX_RSEQ_TYPES_H
4 
5 #include <linux/irq_work_types.h>
6 #include <linux/types.h>
7 #include <linux/workqueue_types.h>
8 
9 #ifdef CONFIG_RSEQ
10 struct rseq;
11 
12 /*
13  * rseq_event::has_rseq contains the ABI version number so preserving it
14  * in AND operations requires a mask.
15  */
16 #define RSEQ_HAS_RSEQ_VERSION_MASK	0xff
17 
18 /**
19  * struct rseq_event - Storage for rseq related event management
20  * @all:		Compound to initialize and clear the data efficiently
21  * @events:		Compound to access events with a single load/store
22  * @sched_switch:	True if the task was scheduled and needs update on
23  *			exit to user
24  * @ids_changed:	Indicator that IDs need to be updated
25  * @user_irq:		True on interrupt entry from user mode
26  * @has_rseq:		Greater than 0 if the task has a rseq pointer installed.
27  *			Contains the RSEQ version number
28  * @error:		Compound error code for the slow path to analyze
29  * @fatal:		User space data corrupted or invalid
30  * @slowpath:		Indicator that slow path processing via TIF_NOTIFY_RESUME
31  *			is required
32  *
33  * @sched_switch and @ids_changed must be adjacent and the combo must be
34  * 16bit aligned to allow a single store, when both are set at the same
35  * time in the scheduler.
36  */
37 struct rseq_event {
38 	union {
39 		u64				all;
40 		struct {
41 			union {
42 				u32		events;
43 				struct {
44 					u8	sched_switch;
45 					u8	ids_changed;
46 					u8	user_irq;
47 				};
48 			};
49 
50 			u8			has_rseq;
51 			u8			__pad;
52 			union {
53 				u16		error;
54 				struct {
55 					u8	fatal;
56 					u8	slowpath;
57 				};
58 			};
59 		};
60 	};
61 };
62 
63 /**
64  * struct rseq_ids - Cache for ids, which need to be updated
65  * @cpu_cid:	Compound of @cpu_id and @mm_cid to make the
66  *		compiler emit a single compare on 64-bit
67  * @cpu_id:	The CPU ID which was written last to user space
68  * @mm_cid:	The MM CID which was written last to user space
69  * @node_id:	The node ID which was written last to user space
70  *
71  * @cpu_id, @mm_cid and @node_id are updated when the data is written to user space.
72  */
73 struct rseq_ids {
74 	union {
75 		u64		cpu_cid;
76 		struct {
77 			u32	cpu_id;
78 			u32	mm_cid;
79 		};
80 	};
81 	u32			node_id;
82 };
83 
84 /**
85  * union rseq_slice_state - Status information for rseq time slice extension
86  * @state:	Compound to access the overall state
87  * @enabled:	Time slice extension is enabled for the task
88  * @granted:	Time slice extension was granted to the task
89  */
90 union rseq_slice_state {
91 	u16			state;
92 	struct {
93 		u8		enabled;
94 		u8		granted;
95 	};
96 };
97 
98 /**
99  * struct rseq_slice - Status information for rseq time slice extension
100  * @state:	Time slice extension state
101  * @expires:	The time when a grant expires
102  * @yielded:	Indicator for rseq_slice_yield()
103  */
104 struct rseq_slice {
105 	union rseq_slice_state	state;
106 	u64			expires;
107 	u8			yielded;
108 };
109 
110 /**
111  * struct rseq_data - Storage for all rseq related data
112  * @usrptr:	Pointer to the registered user space RSEQ memory
113  * @len:	Length of the RSEQ region
114  * @sig:	Signature of critical section abort IPs
115  * @event:	Storage for event management
116  * @ids:	Storage for cached CPU ID and MM CID
117  * @slice:	Storage for time slice extension data
118  */
119 struct rseq_data {
120 	struct rseq __user		*usrptr;
121 	u32				len;
122 	u32				sig;
123 	struct rseq_event		event;
124 	struct rseq_ids			ids;
125 #ifdef CONFIG_RSEQ_SLICE_EXTENSION
126 	struct rseq_slice		slice;
127 #endif
128 };
129 
130 #else /* CONFIG_RSEQ */
131 struct rseq_data { };
132 #endif /* !CONFIG_RSEQ */
133 
134 #ifdef CONFIG_SCHED_MM_CID
135 
136 #define MM_CID_UNSET	BIT(31)
137 #define MM_CID_ONCPU	BIT(30)
138 #define MM_CID_TRANSIT	BIT(29)
139 
140 /**
141  * struct sched_mm_cid - Storage for per task MM CID data
142  * @active:	MM CID is active for the task
143  * @cid:	The CID associated to the task either permanently or
144  *		borrowed from the CPU
145  * @node:	Queued in the per MM MMCID list
146  */
147 struct sched_mm_cid {
148 	unsigned int		active;
149 	unsigned int		cid;
150 	struct hlist_node	node;
151 };
152 
153 /**
154  * struct mm_cid_pcpu - Storage for per CPU MM_CID data
155  * @cid:	The CID associated to the CPU either permanently or
156  *		while a task with a CID is running
157  */
158 struct mm_cid_pcpu {
159 	unsigned int	cid;
160 }____cacheline_aligned_in_smp;
161 
162 /**
163  * struct mm_mm_cid - Storage for per MM CID data
164  * @pcpu:		Per CPU storage for CIDs associated to a CPU
165  * @mode:		Indicates per CPU and transition mode
166  * @max_cids:		The exclusive maximum CID value for allocation and convergence
167  * @irq_work:		irq_work to handle the affinity mode change case
168  * @work:		Regular work to handle the affinity mode change case
169  * @lock:		Spinlock to protect against affinity setting which can't take @mutex
170  * @mutex:		Mutex to serialize forks and exits related to this mm
171  * @user_list:		List of the MM CID users of a MM
172  * @nr_cpus_allowed:	The number of CPUs in the per MM allowed CPUs map. The map
173  *			is growth only.
174  * @users:		The number of tasks sharing this MM. Separate from mm::mm_users
175  *			as that is modified by mmget()/mm_put() by other entities which
176  *			do not actually share the MM.
177  * @pcpu_thrs:		Threshold for switching back from per CPU mode
178  * @update_deferred:	A deferred switch back to per task mode is pending.
179  */
180 struct mm_mm_cid {
181 	/* Hotpath read mostly members */
182 	struct mm_cid_pcpu	__percpu *pcpu;
183 	unsigned int		mode;
184 	unsigned int		max_cids;
185 
186 	/* Rarely used. Moves @lock and @mutex into the second cacheline */
187 	struct irq_work		irq_work;
188 	struct work_struct	work;
189 
190 	raw_spinlock_t		lock;
191 	struct mutex		mutex;
192 	struct hlist_head	user_list;
193 
194 	/* Low frequency modified */
195 	unsigned int		nr_cpus_allowed;
196 	unsigned int		users;
197 	unsigned int		pcpu_thrs;
198 	unsigned int		update_deferred;
199 } ____cacheline_aligned;
200 #else /* CONFIG_SCHED_MM_CID */
201 struct mm_mm_cid { };
202 struct sched_mm_cid { };
203 #endif /* !CONFIG_SCHED_MM_CID */
204 
205 #endif
206