xref: /linux/tools/testing/selftests/resctrl/resctrl.h (revision 367f931e6476747edbde4e7c7b95fc5d5b724934)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef RESCTRL_H
3 #define RESCTRL_H
4 #include <stdio.h>
5 #include <math.h>
6 #include <errno.h>
7 #include <sched.h>
8 #include <stdlib.h>
9 #include <unistd.h>
10 #include <string.h>
11 #include <signal.h>
12 #include <dirent.h>
13 #include <stdbool.h>
14 #include <ctype.h>
15 #include <sys/stat.h>
16 #include <sys/ioctl.h>
17 #include <sys/mount.h>
18 #include <sys/types.h>
19 #include <sys/wait.h>
20 #include <sys/select.h>
21 #include <sys/time.h>
22 #include <sys/eventfd.h>
23 #include <asm/unistd.h>
24 #include <linux/perf_event.h>
25 #include <linux/compiler.h>
26 #include <linux/bits.h>
27 #include "kselftest.h"
28 
29 #define MB			(1024 * 1024)
30 #define RESCTRL_PATH		"/sys/fs/resctrl"
31 #define PHYS_ID_PATH		"/sys/devices/system/cpu/cpu"
32 #define INFO_PATH		"/sys/fs/resctrl/info"
33 
34 /*
35  * CPU vendor IDs
36  *
37  * Define as bits because they're used for vendor_specific bitmask in
38  * the struct resctrl_test.
39  */
40 #define ARCH_INTEL	BIT(0)
41 #define ARCH_AMD	BIT(1)
42 #define ARCH_HYGON	BIT(2)
43 
44 #define END_OF_TESTS	1
45 
46 #define BENCHMARK_ARGS		64
47 
48 #define MINIMUM_SPAN		(250 * MB)
49 
50 /*
51  * Memory bandwidth (in MiB) below which the bandwidth comparisons
52  * between iMC and resctrl are considered unreliable. For example RAS
53  * features or memory performance features that generate memory traffic
54  * may drive accesses that are counted differently by performance counters
55  * and MBM respectively, for instance generating "overhead" traffic which
56  * is not counted against any specific RMID.
57  */
58 #define THROTTLE_THRESHOLD	750
59 
60 /*
61  * fill_buf_param:	"fill_buf" benchmark parameters
62  * @buf_size:		Size (in bytes) of buffer used in benchmark.
63  *			"fill_buf" allocates and initializes buffer of
64  *			@buf_size. User can change value via command line.
65  * @memflush:		If false the buffer will not be flushed after
66  *			allocation and initialization, otherwise the
67  *			buffer will be flushed. User can change value via
68  *			command line (via integers with 0 interpreted as
69  *			false and anything else as true).
70  */
71 struct fill_buf_param {
72 	size_t		buf_size;
73 	bool		memflush;
74 };
75 
76 /*
77  * user_params:		User supplied parameters
78  * @cpu:		CPU number to which the benchmark will be bound to
79  * @bits:		Number of bits used for cache allocation size
80  * @benchmark_cmd:	Benchmark command to run during (some of the) tests
81  * @fill_buf:		Pointer to user provided parameters for "fill_buf",
82  *			NULL if user did not provide parameters and test
83  *			specific defaults should be used.
84  */
85 struct user_params {
86 	int cpu;
87 	int bits;
88 	const char *benchmark_cmd[BENCHMARK_ARGS];
89 	const struct fill_buf_param *fill_buf;
90 };
91 
92 /*
93  * resctrl_test:	resctrl test definition
94  * @name:		Test name
95  * @group:		Test group - a common name for tests that share some characteristic
96  *			(e.g., L3 CAT test belongs to the CAT group). Can be NULL
97  * @resource:		Resource to test (e.g., MB, L3, L2, etc.)
98  * @vendor_specific:	Bitmask for vendor-specific tests (can be 0 for universal tests)
99  * @disabled:		Test is disabled
100  * @feature_check:	Callback to check required resctrl features
101  * @run_test:		Callback to run the test
102  * @cleanup:		Callback to cleanup after the test
103  */
104 struct resctrl_test {
105 	const char	*name;
106 	const char	*group;
107 	const char	*resource;
108 	unsigned int	vendor_specific;
109 	bool		disabled;
110 	bool		(*feature_check)(const struct resctrl_test *test);
111 	int		(*run_test)(const struct resctrl_test *test,
112 				    const struct user_params *uparams);
113 	void		(*cleanup)(void);
114 };
115 
116 /*
117  * resctrl_val_param:	resctrl test parameters
118  * @ctrlgrp:		Name of the control monitor group (con_mon grp)
119  * @mongrp:		Name of the monitor group (mon grp)
120  * @filename:		Name of file to which the o/p should be written
121  * @init:		Callback function to initialize test environment
122  * @setup:		Callback function to setup per test run environment
123  * @measure:		Callback that performs the measurement (a single test)
124  * @fill_buf:		Parameters for default "fill_buf" benchmark.
125  *			Initialized with user provided parameters, possibly
126  *			adapted to be relevant to the test. If user does
127  *			not provide parameters for "fill_buf" nor a
128  *			replacement benchmark then initialized with defaults
129  *			appropriate for test. NULL if user provided
130  *			benchmark.
131  */
132 struct resctrl_val_param {
133 	const char		*ctrlgrp;
134 	const char		*mongrp;
135 	char			filename[64];
136 	unsigned long		mask;
137 	int			num_of_runs;
138 	int			(*init)(const struct resctrl_val_param *param,
139 					int domain_id);
140 	int			(*setup)(const struct resctrl_test *test,
141 					 const struct user_params *uparams,
142 					 struct resctrl_val_param *param);
143 	int			(*measure)(const struct user_params *uparams,
144 					   struct resctrl_val_param *param,
145 					   pid_t bm_pid);
146 	struct fill_buf_param	*fill_buf;
147 };
148 
149 struct perf_event_read {
150 	__u64 nr;			/* The number of events */
151 	struct {
152 		__u64 value;		/* The value of the event */
153 	} values[2];
154 };
155 
156 /*
157  * Memory location that consumes values compiler must not optimize away.
158  * Volatile ensures writes to this location cannot be optimized away by
159  * compiler.
160  */
161 extern volatile int *value_sink;
162 
163 extern int snc_unreliable;
164 
165 extern char llc_occup_path[1024];
166 
167 int snc_nodes_per_l3_cache(void);
168 unsigned int get_vendor(void);
169 bool check_resctrlfs_support(void);
170 int filter_dmesg(void);
171 int get_domain_id(const char *resource, int cpu_no, int *domain_id);
172 int mount_resctrlfs(void);
173 int umount_resctrlfs(void);
174 bool resctrl_resource_exists(const char *resource);
175 bool resctrl_mon_feature_exists(const char *resource, const char *feature);
176 bool resource_info_file_exists(const char *resource, const char *file);
177 bool test_resource_feature_check(const struct resctrl_test *test);
178 char *fgrep(FILE *inf, const char *str);
179 int taskset_benchmark(pid_t bm_pid, int cpu_no, cpu_set_t *old_affinity);
180 int taskset_restore(pid_t bm_pid, cpu_set_t *old_affinity);
181 int write_schemata(const char *ctrlgrp, char *schemata, int cpu_no,
182 		   const char *resource);
183 int write_bm_pid_to_resctrl(pid_t bm_pid, const char *ctrlgrp, const char *mongrp);
184 int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
185 		    int group_fd, unsigned long flags);
186 unsigned char *alloc_buffer(size_t buf_size, bool memflush);
187 void mem_flush(unsigned char *buf, size_t buf_size);
188 void fill_cache_read(unsigned char *buf, size_t buf_size, bool once);
189 ssize_t get_fill_buf_size(int cpu_no, const char *cache_type);
190 int initialize_read_mem_bw_imc(void);
191 int measure_read_mem_bw(const struct user_params *uparams,
192 			struct resctrl_val_param *param, pid_t bm_pid);
193 void initialize_mem_bw_resctrl(const struct resctrl_val_param *param,
194 			       int domain_id);
195 int resctrl_val(const struct resctrl_test *test,
196 		const struct user_params *uparams,
197 		struct resctrl_val_param *param);
198 unsigned long create_bit_mask(unsigned int start, unsigned int len);
199 unsigned int count_contiguous_bits(unsigned long val, unsigned int *start);
200 int get_full_cbm(const char *cache_type, unsigned long *mask);
201 int get_mask_no_shareable(const char *cache_type, unsigned long *mask);
202 int get_cache_size(int cpu_no, const char *cache_type, unsigned long *cache_size);
203 int resource_info_unsigned_get(const char *resource, const char *filename, unsigned int *val);
204 void ctrlc_handler(int signum, siginfo_t *info, void *ptr);
205 int signal_handler_register(const struct resctrl_test *test);
206 void signal_handler_unregister(void);
207 unsigned int count_bits(unsigned long n);
208 int snc_kernel_support(void);
209 
210 void perf_event_attr_initialize(struct perf_event_attr *pea, __u64 config);
211 void perf_event_initialize_read_format(struct perf_event_read *pe_read);
212 int perf_open(struct perf_event_attr *pea, pid_t pid, int cpu_no);
213 int perf_event_reset_enable(int pe_fd);
214 int perf_event_measure(int pe_fd, struct perf_event_read *pe_read,
215 		       const char *filename, pid_t bm_pid);
216 int measure_llc_resctrl(const char *filename, pid_t bm_pid);
217 void show_cache_info(int no_of_bits, __u64 avg_llc_val, size_t cache_span, bool lines);
218 
219 /*
220  * cache_portion_size - Calculate the size of a cache portion
221  * @cache_size:		Total cache size in bytes
222  * @portion_mask:	Cache portion mask
223  * @full_cache_mask:	Full Cache Bit Mask (CBM) for the cache
224  *
225  * Return: The size of the cache portion in bytes.
226  */
227 static inline unsigned long cache_portion_size(unsigned long cache_size,
228 					       unsigned long portion_mask,
229 					       unsigned long full_cache_mask)
230 {
231 	unsigned int bits = count_bits(full_cache_mask);
232 
233 	/*
234 	 * With no bits the full CBM, assume cache cannot be split into
235 	 * smaller portions. To avoid divide by zero, return cache_size.
236 	 */
237 	if (!bits)
238 		return cache_size;
239 
240 	return cache_size * count_bits(portion_mask) / bits;
241 }
242 
243 extern struct resctrl_test mbm_test;
244 extern struct resctrl_test mba_test;
245 extern struct resctrl_test cmt_test;
246 extern struct resctrl_test l3_cat_test;
247 extern struct resctrl_test l3_noncont_cat_test;
248 extern struct resctrl_test l2_noncont_cat_test;
249 
250 #endif /* RESCTRL_H */
251