xref: /linux/fs/resctrl/internal.h (revision 2cb8eeaf00efc037988910de17ffe592b23941a6)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _FS_RESCTRL_INTERNAL_H
3 #define _FS_RESCTRL_INTERNAL_H
4 
5 #include <linux/resctrl.h>
6 #include <linux/kernfs.h>
7 #include <linux/fs_context.h>
8 #include <linux/tick.h>
9 
10 #define CQM_LIMBOCHECK_INTERVAL	1000
11 
12 /**
13  * cpumask_any_housekeeping() - Choose any CPU in @mask, preferring those that
14  *			        aren't marked nohz_full
15  * @mask:	The mask to pick a CPU from.
16  * @exclude_cpu:The CPU to avoid picking.
17  *
18  * Returns a CPU from @mask, but not @exclude_cpu. If there are housekeeping
19  * CPUs that don't use nohz_full, these are preferred. Pass
20  * RESCTRL_PICK_ANY_CPU to avoid excluding any CPUs.
21  *
22  * When a CPU is excluded, returns >= nr_cpu_ids if no CPUs are available.
23  */
24 static inline unsigned int
cpumask_any_housekeeping(const struct cpumask * mask,int exclude_cpu)25 cpumask_any_housekeeping(const struct cpumask *mask, int exclude_cpu)
26 {
27 	unsigned int cpu;
28 
29 	/* Try to find a CPU that isn't nohz_full to use in preference */
30 	if (tick_nohz_full_enabled()) {
31 		cpu = cpumask_any_andnot_but(mask, tick_nohz_full_mask, exclude_cpu);
32 		if (cpu < nr_cpu_ids)
33 			return cpu;
34 	}
35 
36 	return cpumask_any_but(mask, exclude_cpu);
37 }
38 
39 struct rdt_fs_context {
40 	struct kernfs_fs_context	kfc;
41 	bool				enable_cdpl2;
42 	bool				enable_cdpl3;
43 	bool				enable_mba_mbps;
44 	bool				enable_debug;
45 };
46 
rdt_fc2context(struct fs_context * fc)47 static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc)
48 {
49 	struct kernfs_fs_context *kfc = fc->fs_private;
50 
51 	return container_of(kfc, struct rdt_fs_context, kfc);
52 }
53 
54 /**
55  * struct mon_evt - Properties of a monitor event
56  * @evtid:		event id
57  * @rid:		resource id for this event
58  * @name:		name of the event
59  * @evt_cfg:		Event configuration value that represents the
60  *			memory transactions (e.g., READS_TO_LOCAL_MEM,
61  *			READS_TO_REMOTE_MEM) being tracked by @evtid.
62  *			Only valid if @evtid is an MBM event.
63  * @configurable:	true if the event is configurable
64  * @enabled:		true if the event is enabled
65  */
66 struct mon_evt {
67 	enum resctrl_event_id	evtid;
68 	enum resctrl_res_level	rid;
69 	char			*name;
70 	u32			evt_cfg;
71 	bool			configurable;
72 	bool			enabled;
73 };
74 
75 extern struct mon_evt mon_event_all[QOS_NUM_EVENTS];
76 
77 #define for_each_mon_event(mevt) for (mevt = &mon_event_all[QOS_FIRST_EVENT];	\
78 				      mevt < &mon_event_all[QOS_NUM_EVENTS]; mevt++)
79 
80 /**
81  * struct mon_data - Monitoring details for each event file.
82  * @list:            Member of the global @mon_data_kn_priv_list list.
83  * @rid:             Resource id associated with the event file.
84  * @evtid:           Event id associated with the event file.
85  * @sum:             Set when event must be summed across multiple
86  *                   domains.
87  * @domid:           When @sum is zero this is the domain to which
88  *                   the event file belongs. When @sum is one this
89  *                   is the id of the L3 cache that all domains to be
90  *                   summed share.
91  *
92  * Pointed to by the kernfs kn->priv field of monitoring event files.
93  * Readers and writers must hold rdtgroup_mutex.
94  */
95 struct mon_data {
96 	struct list_head	list;
97 	enum resctrl_res_level	rid;
98 	enum resctrl_event_id	evtid;
99 	int			domid;
100 	bool			sum;
101 };
102 
103 /**
104  * struct rmid_read - Data passed across smp_call*() to read event count.
105  * @rgrp:  Resource group for which the counter is being read. If it is a parent
106  *	   resource group then its event count is summed with the count from all
107  *	   its child resource groups.
108  * @r:	   Resource describing the properties of the event being read.
109  * @d:	   Domain that the counter should be read from. If NULL then sum all
110  *	   domains in @r sharing L3 @ci.id
111  * @evtid: Which monitor event to read.
112  * @first: Initialize MBM counter when true.
113  * @ci:    Cacheinfo for L3. Only set when @d is NULL. Used when summing domains.
114  * @is_mbm_cntr: true if "mbm_event" counter assignment mode is enabled and it
115  *	   is an MBM event.
116  * @err:   Error encountered when reading counter.
117  * @val:   Returned value of event counter. If @rgrp is a parent resource group,
118  *	   @val includes the sum of event counts from its child resource groups.
119  *	   If @d is NULL, @val includes the sum of all domains in @r sharing @ci.id,
120  *	   (summed across child resource groups if @rgrp is a parent resource group).
121  * @arch_mon_ctx: Hardware monitor allocated for this read request (MPAM only).
122  */
123 struct rmid_read {
124 	struct rdtgroup		*rgrp;
125 	struct rdt_resource	*r;
126 	struct rdt_mon_domain	*d;
127 	enum resctrl_event_id	evtid;
128 	bool			first;
129 	struct cacheinfo	*ci;
130 	bool			is_mbm_cntr;
131 	int			err;
132 	u64			val;
133 	void			*arch_mon_ctx;
134 };
135 
136 extern struct list_head resctrl_schema_all;
137 
138 extern bool resctrl_mounted;
139 
140 enum rdt_group_type {
141 	RDTCTRL_GROUP = 0,
142 	RDTMON_GROUP,
143 	RDT_NUM_GROUP,
144 };
145 
146 /**
147  * enum rdtgrp_mode - Mode of a RDT resource group
148  * @RDT_MODE_SHAREABLE: This resource group allows sharing of its allocations
149  * @RDT_MODE_EXCLUSIVE: No sharing of this resource group's allocations allowed
150  * @RDT_MODE_PSEUDO_LOCKSETUP: Resource group will be used for Pseudo-Locking
151  * @RDT_MODE_PSEUDO_LOCKED: No sharing of this resource group's allocations
152  *                          allowed AND the allocations are Cache Pseudo-Locked
153  * @RDT_NUM_MODES: Total number of modes
154  *
155  * The mode of a resource group enables control over the allowed overlap
156  * between allocations associated with different resource groups (classes
157  * of service). User is able to modify the mode of a resource group by
158  * writing to the "mode" resctrl file associated with the resource group.
159  *
160  * The "shareable", "exclusive", and "pseudo-locksetup" modes are set by
161  * writing the appropriate text to the "mode" file. A resource group enters
162  * "pseudo-locked" mode after the schemata is written while the resource
163  * group is in "pseudo-locksetup" mode.
164  */
165 enum rdtgrp_mode {
166 	RDT_MODE_SHAREABLE = 0,
167 	RDT_MODE_EXCLUSIVE,
168 	RDT_MODE_PSEUDO_LOCKSETUP,
169 	RDT_MODE_PSEUDO_LOCKED,
170 
171 	/* Must be last */
172 	RDT_NUM_MODES,
173 };
174 
175 /**
176  * struct mongroup - store mon group's data in resctrl fs.
177  * @mon_data_kn:		kernfs node for the mon_data directory
178  * @parent:			parent rdtgrp
179  * @crdtgrp_list:		child rdtgroup node list
180  * @rmid:			rmid for this rdtgroup
181  */
182 struct mongroup {
183 	struct kernfs_node	*mon_data_kn;
184 	struct rdtgroup		*parent;
185 	struct list_head	crdtgrp_list;
186 	u32			rmid;
187 };
188 
189 /**
190  * struct rdtgroup - store rdtgroup's data in resctrl file system.
191  * @kn:				kernfs node
192  * @rdtgroup_list:		linked list for all rdtgroups
193  * @closid:			closid for this rdtgroup
194  * @cpu_mask:			CPUs assigned to this rdtgroup
195  * @flags:			status bits
196  * @waitcount:			how many cpus expect to find this
197  *				group when they acquire rdtgroup_mutex
198  * @type:			indicates type of this rdtgroup - either
199  *				monitor only or ctrl_mon group
200  * @mon:			mongroup related data
201  * @mode:			mode of resource group
202  * @mba_mbps_event:		input monitoring event id when mba_sc is enabled
203  * @plr:			pseudo-locked region
204  */
205 struct rdtgroup {
206 	struct kernfs_node		*kn;
207 	struct list_head		rdtgroup_list;
208 	u32				closid;
209 	struct cpumask			cpu_mask;
210 	int				flags;
211 	atomic_t			waitcount;
212 	enum rdt_group_type		type;
213 	struct mongroup			mon;
214 	enum rdtgrp_mode		mode;
215 	enum resctrl_event_id		mba_mbps_event;
216 	struct pseudo_lock_region	*plr;
217 };
218 
219 /* rdtgroup.flags */
220 #define	RDT_DELETED		1
221 
222 /* rftype.flags */
223 #define RFTYPE_FLAGS_CPUS_LIST	1
224 
225 /*
226  * Define the file type flags for base and info directories.
227  */
228 #define RFTYPE_INFO			BIT(0)
229 
230 #define RFTYPE_BASE			BIT(1)
231 
232 #define RFTYPE_CTRL			BIT(4)
233 
234 #define RFTYPE_MON			BIT(5)
235 
236 #define RFTYPE_TOP			BIT(6)
237 
238 #define RFTYPE_RES_CACHE		BIT(8)
239 
240 #define RFTYPE_RES_MB			BIT(9)
241 
242 #define RFTYPE_DEBUG			BIT(10)
243 
244 #define RFTYPE_ASSIGN_CONFIG		BIT(11)
245 
246 #define RFTYPE_CTRL_INFO		(RFTYPE_INFO | RFTYPE_CTRL)
247 
248 #define RFTYPE_MON_INFO			(RFTYPE_INFO | RFTYPE_MON)
249 
250 #define RFTYPE_TOP_INFO			(RFTYPE_INFO | RFTYPE_TOP)
251 
252 #define RFTYPE_CTRL_BASE		(RFTYPE_BASE | RFTYPE_CTRL)
253 
254 #define RFTYPE_MON_BASE			(RFTYPE_BASE | RFTYPE_MON)
255 
256 /* List of all resource groups */
257 extern struct list_head rdt_all_groups;
258 
259 extern int max_name_width;
260 
261 /**
262  * struct rftype - describe each file in the resctrl file system
263  * @name:	File name
264  * @mode:	Access mode
265  * @kf_ops:	File operations
266  * @flags:	File specific RFTYPE_FLAGS_* flags
267  * @fflags:	File specific RFTYPE_* flags
268  * @seq_show:	Show content of the file
269  * @write:	Write to the file
270  */
271 struct rftype {
272 	char			*name;
273 	umode_t			mode;
274 	const struct kernfs_ops	*kf_ops;
275 	unsigned long		flags;
276 	unsigned long		fflags;
277 
278 	int (*seq_show)(struct kernfs_open_file *of,
279 			struct seq_file *sf, void *v);
280 	/*
281 	 * write() is the generic write callback which maps directly to
282 	 * kernfs write operation and overrides all other operations.
283 	 * Maximum write size is determined by ->max_write_len.
284 	 */
285 	ssize_t (*write)(struct kernfs_open_file *of,
286 			 char *buf, size_t nbytes, loff_t off);
287 };
288 
289 /**
290  * struct mbm_state - status for each MBM counter in each domain
291  * @prev_bw_bytes: Previous bytes value read for bandwidth calculation
292  * @prev_bw:	The most recent bandwidth in MBps
293  */
294 struct mbm_state {
295 	u64	prev_bw_bytes;
296 	u32	prev_bw;
297 };
298 
299 extern struct mutex rdtgroup_mutex;
300 
rdt_kn_name(const struct kernfs_node * kn)301 static inline const char *rdt_kn_name(const struct kernfs_node *kn)
302 {
303 	return rcu_dereference_check(kn->name, lockdep_is_held(&rdtgroup_mutex));
304 }
305 
306 extern struct rdtgroup rdtgroup_default;
307 
308 extern struct dentry *debugfs_resctrl;
309 
310 extern enum resctrl_event_id mba_mbps_default_event;
311 
312 void rdt_last_cmd_clear(void);
313 
314 void rdt_last_cmd_puts(const char *s);
315 
316 __printf(1, 2)
317 void rdt_last_cmd_printf(const char *fmt, ...);
318 
319 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
320 
321 void rdtgroup_kn_unlock(struct kernfs_node *kn);
322 
323 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name);
324 
325 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
326 			     umode_t mask);
327 
328 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
329 				char *buf, size_t nbytes, loff_t off);
330 
331 int rdtgroup_schemata_show(struct kernfs_open_file *of,
332 			   struct seq_file *s, void *v);
333 
334 ssize_t rdtgroup_mba_mbps_event_write(struct kernfs_open_file *of,
335 				      char *buf, size_t nbytes, loff_t off);
336 
337 int rdtgroup_mba_mbps_event_show(struct kernfs_open_file *of,
338 				 struct seq_file *s, void *v);
339 
340 bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_ctrl_domain *d,
341 			   unsigned long cbm, int closid, bool exclusive);
342 
343 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_ctrl_domain *d,
344 				  unsigned long cbm);
345 
346 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
347 
348 int rdtgroup_tasks_assigned(struct rdtgroup *r);
349 
350 int closids_supported(void);
351 
352 void closid_free(int closid);
353 
354 int alloc_rmid(u32 closid);
355 
356 void free_rmid(u32 closid, u32 rmid);
357 
358 void resctrl_mon_resource_exit(void);
359 
360 void mon_event_count(void *info);
361 
362 int rdtgroup_mondata_show(struct seq_file *m, void *arg);
363 
364 void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
365 		    struct rdt_mon_domain *d, struct rdtgroup *rdtgrp,
366 		    cpumask_t *cpumask, int evtid, int first);
367 
368 int resctrl_mon_resource_init(void);
369 
370 void mbm_setup_overflow_handler(struct rdt_mon_domain *dom,
371 				unsigned long delay_ms,
372 				int exclude_cpu);
373 
374 void mbm_handle_overflow(struct work_struct *work);
375 
376 bool is_mba_sc(struct rdt_resource *r);
377 
378 void cqm_setup_limbo_handler(struct rdt_mon_domain *dom, unsigned long delay_ms,
379 			     int exclude_cpu);
380 
381 void cqm_handle_limbo(struct work_struct *work);
382 
383 bool has_busy_rmid(struct rdt_mon_domain *d);
384 
385 void __check_limbo(struct rdt_mon_domain *d, bool force_free);
386 
387 void resctrl_file_fflags_init(const char *config, unsigned long fflags);
388 
389 void rdt_staged_configs_clear(void);
390 
391 bool closid_allocated(unsigned int closid);
392 
393 int resctrl_find_cleanest_closid(void);
394 
395 void *rdt_kn_parent_priv(struct kernfs_node *kn);
396 
397 int resctrl_mbm_assign_mode_show(struct kernfs_open_file *of, struct seq_file *s, void *v);
398 
399 ssize_t resctrl_mbm_assign_mode_write(struct kernfs_open_file *of, char *buf,
400 				      size_t nbytes, loff_t off);
401 
402 void resctrl_bmec_files_show(struct rdt_resource *r, struct kernfs_node *l3_mon_kn,
403 			     bool show);
404 
405 int resctrl_num_mbm_cntrs_show(struct kernfs_open_file *of, struct seq_file *s, void *v);
406 
407 int resctrl_available_mbm_cntrs_show(struct kernfs_open_file *of, struct seq_file *s,
408 				     void *v);
409 
410 void rdtgroup_assign_cntrs(struct rdtgroup *rdtgrp);
411 
412 void rdtgroup_unassign_cntrs(struct rdtgroup *rdtgrp);
413 
414 int event_filter_show(struct kernfs_open_file *of, struct seq_file *seq, void *v);
415 
416 ssize_t event_filter_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
417 			   loff_t off);
418 
419 int resctrl_mbm_assign_on_mkdir_show(struct kernfs_open_file *of,
420 				     struct seq_file *s, void *v);
421 
422 ssize_t resctrl_mbm_assign_on_mkdir_write(struct kernfs_open_file *of, char *buf,
423 					  size_t nbytes, loff_t off);
424 
425 int mbm_L3_assignments_show(struct kernfs_open_file *of, struct seq_file *s, void *v);
426 
427 ssize_t mbm_L3_assignments_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
428 				 loff_t off);
429 
430 #ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK
431 int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
432 
433 int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
434 
435 bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_ctrl_domain *d, unsigned long cbm);
436 
437 bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d);
438 
439 int rdt_pseudo_lock_init(void);
440 
441 void rdt_pseudo_lock_release(void);
442 
443 int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
444 
445 void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
446 
447 #else
rdtgroup_locksetup_enter(struct rdtgroup * rdtgrp)448 static inline int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
449 {
450 	return -EOPNOTSUPP;
451 }
452 
rdtgroup_locksetup_exit(struct rdtgroup * rdtgrp)453 static inline int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
454 {
455 	return -EOPNOTSUPP;
456 }
457 
rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_ctrl_domain * d,unsigned long cbm)458 static inline bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_ctrl_domain *d, unsigned long cbm)
459 {
460 	return false;
461 }
462 
rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain * d)463 static inline bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d)
464 {
465 	return false;
466 }
467 
rdt_pseudo_lock_init(void)468 static inline int rdt_pseudo_lock_init(void) { return 0; }
rdt_pseudo_lock_release(void)469 static inline void rdt_pseudo_lock_release(void) { }
rdtgroup_pseudo_lock_create(struct rdtgroup * rdtgrp)470 static inline int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
471 {
472 	return -EOPNOTSUPP;
473 }
474 
rdtgroup_pseudo_lock_remove(struct rdtgroup * rdtgrp)475 static inline void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) { }
476 #endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */
477 
478 #endif /* _FS_RESCTRL_INTERNAL_H */
479