xref: /linux/fs/resctrl/internal.h (revision 37a93dd5c49b5fda807fd204edf2547c3493319c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _FS_RESCTRL_INTERNAL_H
3 #define _FS_RESCTRL_INTERNAL_H
4 
5 #include <linux/resctrl.h>
6 #include <linux/kernfs.h>
7 #include <linux/fs_context.h>
8 #include <linux/tick.h>
9 
10 #define CQM_LIMBOCHECK_INTERVAL	1000
11 
12 /**
13  * cpumask_any_housekeeping() - Choose any CPU in @mask, preferring those that
14  *			        aren't marked nohz_full
15  * @mask:	The mask to pick a CPU from.
16  * @exclude_cpu:The CPU to avoid picking.
17  *
18  * Returns a CPU from @mask, but not @exclude_cpu. If there are housekeeping
19  * CPUs that don't use nohz_full, these are preferred. Pass
20  * RESCTRL_PICK_ANY_CPU to avoid excluding any CPUs.
21  *
22  * When a CPU is excluded, returns >= nr_cpu_ids if no CPUs are available.
23  */
24 static inline unsigned int
25 cpumask_any_housekeeping(const struct cpumask *mask, int exclude_cpu)
26 {
27 	unsigned int cpu;
28 
29 	/* Try to find a CPU that isn't nohz_full to use in preference */
30 	if (tick_nohz_full_enabled()) {
31 		cpu = cpumask_any_andnot_but(mask, tick_nohz_full_mask, exclude_cpu);
32 		if (cpu < nr_cpu_ids)
33 			return cpu;
34 	}
35 
36 	return cpumask_any_but(mask, exclude_cpu);
37 }
38 
39 struct rdt_fs_context {
40 	struct kernfs_fs_context	kfc;
41 	bool				enable_cdpl2;
42 	bool				enable_cdpl3;
43 	bool				enable_mba_mbps;
44 	bool				enable_debug;
45 };
46 
47 static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc)
48 {
49 	struct kernfs_fs_context *kfc = fc->fs_private;
50 
51 	return container_of(kfc, struct rdt_fs_context, kfc);
52 }
53 
54 /**
55  * struct mon_evt - Properties of a monitor event
56  * @evtid:		event id
57  * @rid:		resource id for this event
58  * @name:		name of the event
59  * @evt_cfg:		Event configuration value that represents the
60  *			memory transactions (e.g., READS_TO_LOCAL_MEM,
61  *			READS_TO_REMOTE_MEM) being tracked by @evtid.
62  *			Only valid if @evtid is an MBM event.
63  * @configurable:	true if the event is configurable
64  * @any_cpu:		true if the event can be read from any CPU
65  * @is_floating_point:	event values are displayed in floating point format
66  * @binary_bits:	number of fixed-point binary bits from architecture,
67  *			only valid if @is_floating_point is true
68  * @enabled:		true if the event is enabled
69  * @arch_priv:		Architecture private data for this event.
70  *			The @arch_priv provided by the architecture via
71  *			resctrl_enable_mon_event().
72  */
73 struct mon_evt {
74 	enum resctrl_event_id	evtid;
75 	enum resctrl_res_level	rid;
76 	char			*name;
77 	u32			evt_cfg;
78 	bool			configurable;
79 	bool			any_cpu;
80 	bool			is_floating_point;
81 	unsigned int		binary_bits;
82 	bool			enabled;
83 	void			*arch_priv;
84 };
85 
86 extern struct mon_evt mon_event_all[QOS_NUM_EVENTS];
87 
88 #define for_each_mon_event(mevt) for (mevt = &mon_event_all[QOS_FIRST_EVENT];	\
89 				      mevt < &mon_event_all[QOS_NUM_EVENTS]; mevt++)
90 
91 /* Limit for mon_evt::binary_bits */
92 #define MAX_BINARY_BITS	27
93 
94 /**
95  * struct mon_data - Monitoring details for each event file.
96  * @list:            Member of the global @mon_data_kn_priv_list list.
97  * @rid:             Resource id associated with the event file.
98  * @evt:             Event structure associated with the event file.
99  * @sum:             Set for RDT_RESOURCE_L3 when event must be summed
100  *                   across multiple domains.
101  * @domid:           When @sum is zero this is the domain to which
102  *                   the event file belongs. When @sum is one this
103  *                   is the id of the L3 cache that all domains to be
104  *                   summed share.
105  *
106  * Pointed to by the kernfs kn->priv field of monitoring event files.
107  * Readers and writers must hold rdtgroup_mutex.
108  */
109 struct mon_data {
110 	struct list_head	list;
111 	enum resctrl_res_level	rid;
112 	struct mon_evt		*evt;
113 	int			domid;
114 	bool			sum;
115 };
116 
117 /**
118  * struct rmid_read - Data passed across smp_call*() to read event count.
119  * @rgrp:  Resource group for which the counter is being read. If it is a parent
120  *	   resource group then its event count is summed with the count from all
121  *	   its child resource groups.
122  * @r:	   Resource describing the properties of the event being read.
123  * @hdr:   Header of domain that the counter should be read from. If NULL then
124  *	   sum all domains in @r sharing L3 @ci.id
125  * @evt:   Which monitor event to read.
126  * @first: Initialize MBM counter when true.
127  * @ci:    Cacheinfo for L3. Only set when @hdr is NULL. Used when summing
128  *	   domains.
129  * @is_mbm_cntr: true if "mbm_event" counter assignment mode is enabled and it
130  *	   is an MBM event.
131  * @err:   Error encountered when reading counter.
132  * @val:   Returned value of event counter. If @rgrp is a parent resource
133  *	   group, @val includes the sum of event counts from its child
134  *	   resource groups.  If @hdr is NULL, @val includes the sum of all
135  *	   domains in @r sharing @ci.id, (summed across child resource groups
136  *	   if @rgrp is a parent resource group).
137  * @arch_mon_ctx: Hardware monitor allocated for this read request (MPAM only).
138  */
139 struct rmid_read {
140 	struct rdtgroup		*rgrp;
141 	struct rdt_resource	*r;
142 	struct rdt_domain_hdr	*hdr;
143 	struct mon_evt		*evt;
144 	bool			first;
145 	struct cacheinfo	*ci;
146 	bool			is_mbm_cntr;
147 	int			err;
148 	u64			val;
149 	void			*arch_mon_ctx;
150 };
151 
152 extern struct list_head resctrl_schema_all;
153 
154 extern bool resctrl_mounted;
155 
156 enum rdt_group_type {
157 	RDTCTRL_GROUP = 0,
158 	RDTMON_GROUP,
159 	RDT_NUM_GROUP,
160 };
161 
162 /**
163  * enum rdtgrp_mode - Mode of a RDT resource group
164  * @RDT_MODE_SHAREABLE: This resource group allows sharing of its allocations
165  * @RDT_MODE_EXCLUSIVE: No sharing of this resource group's allocations allowed
166  * @RDT_MODE_PSEUDO_LOCKSETUP: Resource group will be used for Pseudo-Locking
167  * @RDT_MODE_PSEUDO_LOCKED: No sharing of this resource group's allocations
168  *                          allowed AND the allocations are Cache Pseudo-Locked
169  * @RDT_NUM_MODES: Total number of modes
170  *
171  * The mode of a resource group enables control over the allowed overlap
172  * between allocations associated with different resource groups (classes
173  * of service). User is able to modify the mode of a resource group by
174  * writing to the "mode" resctrl file associated with the resource group.
175  *
176  * The "shareable", "exclusive", and "pseudo-locksetup" modes are set by
177  * writing the appropriate text to the "mode" file. A resource group enters
178  * "pseudo-locked" mode after the schemata is written while the resource
179  * group is in "pseudo-locksetup" mode.
180  */
181 enum rdtgrp_mode {
182 	RDT_MODE_SHAREABLE = 0,
183 	RDT_MODE_EXCLUSIVE,
184 	RDT_MODE_PSEUDO_LOCKSETUP,
185 	RDT_MODE_PSEUDO_LOCKED,
186 
187 	/* Must be last */
188 	RDT_NUM_MODES,
189 };
190 
191 /**
192  * struct mongroup - store mon group's data in resctrl fs.
193  * @mon_data_kn:		kernfs node for the mon_data directory
194  * @parent:			parent rdtgrp
195  * @crdtgrp_list:		child rdtgroup node list
196  * @rmid:			rmid for this rdtgroup
197  */
198 struct mongroup {
199 	struct kernfs_node	*mon_data_kn;
200 	struct rdtgroup		*parent;
201 	struct list_head	crdtgrp_list;
202 	u32			rmid;
203 };
204 
205 /**
206  * struct rdtgroup - store rdtgroup's data in resctrl file system.
207  * @kn:				kernfs node
208  * @rdtgroup_list:		linked list for all rdtgroups
209  * @closid:			closid for this rdtgroup
210  * @cpu_mask:			CPUs assigned to this rdtgroup
211  * @flags:			status bits
212  * @waitcount:			how many cpus expect to find this
213  *				group when they acquire rdtgroup_mutex
214  * @type:			indicates type of this rdtgroup - either
215  *				monitor only or ctrl_mon group
216  * @mon:			mongroup related data
217  * @mode:			mode of resource group
218  * @mba_mbps_event:		input monitoring event id when mba_sc is enabled
219  * @plr:			pseudo-locked region
220  */
221 struct rdtgroup {
222 	struct kernfs_node		*kn;
223 	struct list_head		rdtgroup_list;
224 	u32				closid;
225 	struct cpumask			cpu_mask;
226 	int				flags;
227 	atomic_t			waitcount;
228 	enum rdt_group_type		type;
229 	struct mongroup			mon;
230 	enum rdtgrp_mode		mode;
231 	enum resctrl_event_id		mba_mbps_event;
232 	struct pseudo_lock_region	*plr;
233 };
234 
235 /* rdtgroup.flags */
236 #define	RDT_DELETED		1
237 
238 /* rftype.flags */
239 #define RFTYPE_FLAGS_CPUS_LIST	1
240 
241 /*
242  * Define the file type flags for base and info directories.
243  */
244 #define RFTYPE_INFO			BIT(0)
245 
246 #define RFTYPE_BASE			BIT(1)
247 
248 #define RFTYPE_CTRL			BIT(4)
249 
250 #define RFTYPE_MON			BIT(5)
251 
252 #define RFTYPE_TOP			BIT(6)
253 
254 #define RFTYPE_RES_CACHE		BIT(8)
255 
256 #define RFTYPE_RES_MB			BIT(9)
257 
258 #define RFTYPE_DEBUG			BIT(10)
259 
260 #define RFTYPE_ASSIGN_CONFIG		BIT(11)
261 
262 #define RFTYPE_RES_PERF_PKG		BIT(12)
263 
264 #define RFTYPE_CTRL_INFO		(RFTYPE_INFO | RFTYPE_CTRL)
265 
266 #define RFTYPE_MON_INFO			(RFTYPE_INFO | RFTYPE_MON)
267 
268 #define RFTYPE_TOP_INFO			(RFTYPE_INFO | RFTYPE_TOP)
269 
270 #define RFTYPE_CTRL_BASE		(RFTYPE_BASE | RFTYPE_CTRL)
271 
272 #define RFTYPE_MON_BASE			(RFTYPE_BASE | RFTYPE_MON)
273 
274 /* List of all resource groups */
275 extern struct list_head rdt_all_groups;
276 
277 extern int max_name_width;
278 
279 /**
280  * struct rftype - describe each file in the resctrl file system
281  * @name:	File name
282  * @mode:	Access mode
283  * @kf_ops:	File operations
284  * @flags:	File specific RFTYPE_FLAGS_* flags
285  * @fflags:	File specific RFTYPE_* flags
286  * @seq_show:	Show content of the file
287  * @write:	Write to the file
288  */
289 struct rftype {
290 	char			*name;
291 	umode_t			mode;
292 	const struct kernfs_ops	*kf_ops;
293 	unsigned long		flags;
294 	unsigned long		fflags;
295 
296 	int (*seq_show)(struct kernfs_open_file *of,
297 			struct seq_file *sf, void *v);
298 	/*
299 	 * write() is the generic write callback which maps directly to
300 	 * kernfs write operation and overrides all other operations.
301 	 * Maximum write size is determined by ->max_write_len.
302 	 */
303 	ssize_t (*write)(struct kernfs_open_file *of,
304 			 char *buf, size_t nbytes, loff_t off);
305 };
306 
307 /**
308  * struct mbm_state - status for each MBM counter in each domain
309  * @prev_bw_bytes: Previous bytes value read for bandwidth calculation
310  * @prev_bw:	The most recent bandwidth in MBps
311  */
312 struct mbm_state {
313 	u64	prev_bw_bytes;
314 	u32	prev_bw;
315 };
316 
317 extern struct mutex rdtgroup_mutex;
318 
319 static inline const char *rdt_kn_name(const struct kernfs_node *kn)
320 {
321 	return rcu_dereference_check(kn->name, lockdep_is_held(&rdtgroup_mutex));
322 }
323 
324 extern struct rdtgroup rdtgroup_default;
325 
326 extern struct dentry *debugfs_resctrl;
327 
328 extern enum resctrl_event_id mba_mbps_default_event;
329 
330 void rdt_last_cmd_clear(void);
331 
332 void rdt_last_cmd_puts(const char *s);
333 
334 __printf(1, 2)
335 void rdt_last_cmd_printf(const char *fmt, ...);
336 
337 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
338 
339 void rdtgroup_kn_unlock(struct kernfs_node *kn);
340 
341 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name);
342 
343 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
344 			     umode_t mask);
345 
346 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
347 				char *buf, size_t nbytes, loff_t off);
348 
349 int rdtgroup_schemata_show(struct kernfs_open_file *of,
350 			   struct seq_file *s, void *v);
351 
352 ssize_t rdtgroup_mba_mbps_event_write(struct kernfs_open_file *of,
353 				      char *buf, size_t nbytes, loff_t off);
354 
355 int rdtgroup_mba_mbps_event_show(struct kernfs_open_file *of,
356 				 struct seq_file *s, void *v);
357 
358 bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_ctrl_domain *d,
359 			   unsigned long cbm, int closid, bool exclusive);
360 
361 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_ctrl_domain *d,
362 				  unsigned long cbm);
363 
364 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
365 
366 int rdtgroup_tasks_assigned(struct rdtgroup *r);
367 
368 int closids_supported(void);
369 
370 void closid_free(int closid);
371 
372 int setup_rmid_lru_list(void);
373 
374 void free_rmid_lru_list(void);
375 
376 int alloc_rmid(u32 closid);
377 
378 void free_rmid(u32 closid, u32 rmid);
379 
380 int resctrl_l3_mon_resource_init(void);
381 
382 void resctrl_l3_mon_resource_exit(void);
383 
384 void mon_event_count(void *info);
385 
386 int rdtgroup_mondata_show(struct seq_file *m, void *arg);
387 
388 void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
389 		    struct rdt_domain_hdr *hdr, struct rdtgroup *rdtgrp,
390 		    cpumask_t *cpumask, struct mon_evt *evt, int first);
391 
392 void mbm_setup_overflow_handler(struct rdt_l3_mon_domain *dom,
393 				unsigned long delay_ms,
394 				int exclude_cpu);
395 
396 void mbm_handle_overflow(struct work_struct *work);
397 
398 bool is_mba_sc(struct rdt_resource *r);
399 
400 void cqm_setup_limbo_handler(struct rdt_l3_mon_domain *dom, unsigned long delay_ms,
401 			     int exclude_cpu);
402 
403 void cqm_handle_limbo(struct work_struct *work);
404 
405 bool has_busy_rmid(struct rdt_l3_mon_domain *d);
406 
407 void __check_limbo(struct rdt_l3_mon_domain *d, bool force_free);
408 
409 void resctrl_file_fflags_init(const char *config, unsigned long fflags);
410 
411 void rdt_staged_configs_clear(void);
412 
413 bool closid_allocated(unsigned int closid);
414 
415 bool closid_alloc_fixed(u32 closid);
416 
417 int resctrl_find_cleanest_closid(void);
418 
419 void *rdt_kn_parent_priv(struct kernfs_node *kn);
420 
421 int resctrl_mbm_assign_mode_show(struct kernfs_open_file *of, struct seq_file *s, void *v);
422 
423 ssize_t resctrl_mbm_assign_mode_write(struct kernfs_open_file *of, char *buf,
424 				      size_t nbytes, loff_t off);
425 
426 void resctrl_bmec_files_show(struct rdt_resource *r, struct kernfs_node *l3_mon_kn,
427 			     bool show);
428 
429 int resctrl_num_mbm_cntrs_show(struct kernfs_open_file *of, struct seq_file *s, void *v);
430 
431 int resctrl_available_mbm_cntrs_show(struct kernfs_open_file *of, struct seq_file *s,
432 				     void *v);
433 
434 void rdtgroup_assign_cntrs(struct rdtgroup *rdtgrp);
435 
436 void rdtgroup_unassign_cntrs(struct rdtgroup *rdtgrp);
437 
438 int event_filter_show(struct kernfs_open_file *of, struct seq_file *seq, void *v);
439 
440 ssize_t event_filter_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
441 			   loff_t off);
442 
443 int resctrl_mbm_assign_on_mkdir_show(struct kernfs_open_file *of,
444 				     struct seq_file *s, void *v);
445 
446 ssize_t resctrl_mbm_assign_on_mkdir_write(struct kernfs_open_file *of, char *buf,
447 					  size_t nbytes, loff_t off);
448 
449 int mbm_L3_assignments_show(struct kernfs_open_file *of, struct seq_file *s, void *v);
450 
451 ssize_t mbm_L3_assignments_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
452 				 loff_t off);
453 int resctrl_io_alloc_show(struct kernfs_open_file *of, struct seq_file *seq, void *v);
454 
455 int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid);
456 
457 enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type);
458 
459 ssize_t resctrl_io_alloc_write(struct kernfs_open_file *of, char *buf,
460 			       size_t nbytes, loff_t off);
461 
462 const char *rdtgroup_name_by_closid(u32 closid);
463 int resctrl_io_alloc_cbm_show(struct kernfs_open_file *of, struct seq_file *seq,
464 			      void *v);
465 ssize_t resctrl_io_alloc_cbm_write(struct kernfs_open_file *of, char *buf,
466 				   size_t nbytes, loff_t off);
467 u32 resctrl_io_alloc_closid(struct rdt_resource *r);
468 
469 #ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK
470 int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
471 
472 int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
473 
474 bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_ctrl_domain *d, unsigned long cbm);
475 
476 bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d);
477 
478 int rdt_pseudo_lock_init(void);
479 
480 void rdt_pseudo_lock_release(void);
481 
482 int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
483 
484 void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
485 
486 #else
487 static inline int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
488 {
489 	return -EOPNOTSUPP;
490 }
491 
492 static inline int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
493 {
494 	return -EOPNOTSUPP;
495 }
496 
497 static inline bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_ctrl_domain *d, unsigned long cbm)
498 {
499 	return false;
500 }
501 
502 static inline bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d)
503 {
504 	return false;
505 }
506 
507 static inline int rdt_pseudo_lock_init(void) { return 0; }
508 static inline void rdt_pseudo_lock_release(void) { }
509 static inline int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
510 {
511 	return -EOPNOTSUPP;
512 }
513 
514 static inline void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) { }
515 #endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */
516 
517 #endif /* _FS_RESCTRL_INTERNAL_H */
518