xref: /linux/tools/testing/selftests/resctrl/resctrl.h (revision 2851f57d2dabd76a79365b78fedc80d2ed3ac2d8)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef RESCTRL_H
3 #define RESCTRL_H
4 #include <stdio.h>
5 #include <math.h>
6 #include <errno.h>
7 #include <sched.h>
8 #include <stdlib.h>
9 #include <unistd.h>
10 #include <string.h>
11 #include <signal.h>
12 #include <dirent.h>
13 #include <stdbool.h>
14 #include <sys/stat.h>
15 #include <sys/ioctl.h>
16 #include <sys/mount.h>
17 #include <sys/types.h>
18 #include <sys/wait.h>
19 #include <sys/select.h>
20 #include <sys/time.h>
21 #include <sys/eventfd.h>
22 #include <asm/unistd.h>
23 #include <linux/perf_event.h>
24 #include "../kselftest.h"
25 
26 #define MB			(1024 * 1024)
27 #define RESCTRL_PATH		"/sys/fs/resctrl"
28 #define PHYS_ID_PATH		"/sys/devices/system/cpu/cpu"
29 #define INFO_PATH		"/sys/fs/resctrl/info"
30 
31 /*
32  * CPU vendor IDs
33  *
34  * Define as bits because they're used for vendor_specific bitmask in
35  * the struct resctrl_test.
36  */
37 #define ARCH_INTEL     1
38 #define ARCH_AMD       2
39 
40 #define END_OF_TESTS	1
41 
42 #define BENCHMARK_ARGS		64
43 
44 #define DEFAULT_SPAN		(250 * MB)
45 
46 #define PARENT_EXIT()				\
47 	do {					\
48 		kill(ppid, SIGKILL);		\
49 		umount_resctrlfs();		\
50 		exit(EXIT_FAILURE);		\
51 	} while (0)
52 
53 /*
54  * user_params:		User supplied parameters
55  * @cpu:		CPU number to which the benchmark will be bound to
56  * @bits:		Number of bits used for cache allocation size
57  * @benchmark_cmd:	Benchmark command to run during (some of the) tests
58  */
59 struct user_params {
60 	int cpu;
61 	int bits;
62 	const char *benchmark_cmd[BENCHMARK_ARGS];
63 };
64 
65 /*
66  * resctrl_test:	resctrl test definition
67  * @name:		Test name
68  * @resource:		Resource to test (e.g., MB, L3, L2, etc.)
69  * @vendor_specific:	Bitmask for vendor-specific tests (can be 0 for universal tests)
70  * @disabled:		Test is disabled
71  * @feature_check:	Callback to check required resctrl features
72  * @run_test:		Callback to run the test
73  */
74 struct resctrl_test {
75 	const char	*name;
76 	const char	*resource;
77 	unsigned int	vendor_specific;
78 	bool		disabled;
79 	bool		(*feature_check)(const struct resctrl_test *test);
80 	int		(*run_test)(const struct resctrl_test *test,
81 				    const struct user_params *uparams);
82 };
83 
84 /*
85  * resctrl_val_param:	resctrl test parameters
86  * @resctrl_val:	Resctrl feature (Eg: mbm, mba.. etc)
87  * @ctrlgrp:		Name of the control monitor group (con_mon grp)
88  * @mongrp:		Name of the monitor group (mon grp)
89  * @filename:		Name of file to which the o/p should be written
90  * @bw_report:		Bandwidth report type (reads vs writes)
91  * @setup:		Call back function to setup test environment
92  */
93 struct resctrl_val_param {
94 	char		*resctrl_val;
95 	char		ctrlgrp[64];
96 	char		mongrp[64];
97 	char		filename[64];
98 	char		*bw_report;
99 	unsigned long	mask;
100 	int		num_of_runs;
101 	int		(*setup)(const struct resctrl_test *test,
102 				 const struct user_params *uparams,
103 				 struct resctrl_val_param *param);
104 };
105 
106 struct perf_event_read {
107 	__u64 nr;			/* The number of events */
108 	struct {
109 		__u64 value;		/* The value of the event */
110 	} values[2];
111 };
112 
113 #define MBM_STR			"mbm"
114 #define MBA_STR			"mba"
115 #define CMT_STR			"cmt"
116 #define CAT_STR			"cat"
117 
118 /*
119  * Memory location that consumes values compiler must not optimize away.
120  * Volatile ensures writes to this location cannot be optimized away by
121  * compiler.
122  */
123 extern volatile int *value_sink;
124 
125 extern pid_t bm_pid, ppid;
126 
127 extern char llc_occup_path[1024];
128 
129 int get_vendor(void);
130 bool check_resctrlfs_support(void);
131 int filter_dmesg(void);
132 int get_domain_id(const char *resource, int cpu_no, int *domain_id);
133 int mount_resctrlfs(void);
134 int umount_resctrlfs(void);
135 int validate_bw_report_request(char *bw_report);
136 bool validate_resctrl_feature_request(const char *resource, const char *feature);
137 bool test_resource_feature_check(const struct resctrl_test *test);
138 char *fgrep(FILE *inf, const char *str);
139 int taskset_benchmark(pid_t bm_pid, int cpu_no, cpu_set_t *old_affinity);
140 int taskset_restore(pid_t bm_pid, cpu_set_t *old_affinity);
141 int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, const char *resource);
142 int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
143 			    char *resctrl_val);
144 int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
145 		    int group_fd, unsigned long flags);
146 unsigned char *alloc_buffer(size_t buf_size, int memflush);
147 void mem_flush(unsigned char *buf, size_t buf_size);
148 void fill_cache_read(unsigned char *buf, size_t buf_size, bool once);
149 int run_fill_buf(size_t buf_size, int memflush, int op, bool once);
150 int resctrl_val(const struct resctrl_test *test,
151 		const struct user_params *uparams,
152 		const char * const *benchmark_cmd,
153 		struct resctrl_val_param *param);
154 void tests_cleanup(void);
155 void mbm_test_cleanup(void);
156 void mba_test_cleanup(void);
157 unsigned long create_bit_mask(unsigned int start, unsigned int len);
158 unsigned int count_contiguous_bits(unsigned long val, unsigned int *start);
159 int get_full_cbm(const char *cache_type, unsigned long *mask);
160 int get_mask_no_shareable(const char *cache_type, unsigned long *mask);
161 int get_cache_size(int cpu_no, const char *cache_type, unsigned long *cache_size);
162 void ctrlc_handler(int signum, siginfo_t *info, void *ptr);
163 int signal_handler_register(void);
164 void signal_handler_unregister(void);
165 void cat_test_cleanup(void);
166 unsigned int count_bits(unsigned long n);
167 void cmt_test_cleanup(void);
168 
169 void perf_event_attr_initialize(struct perf_event_attr *pea, __u64 config);
170 void perf_event_initialize_read_format(struct perf_event_read *pe_read);
171 int perf_open(struct perf_event_attr *pea, pid_t pid, int cpu_no);
172 int perf_event_reset_enable(int pe_fd);
173 int perf_event_measure(int pe_fd, struct perf_event_read *pe_read,
174 		       const char *filename, int bm_pid);
175 int measure_llc_resctrl(const char *filename, int bm_pid);
176 void show_cache_info(int no_of_bits, __u64 avg_llc_val, size_t cache_span, bool lines);
177 
178 /*
179  * cache_portion_size - Calculate the size of a cache portion
180  * @cache_size:		Total cache size in bytes
181  * @portion_mask:	Cache portion mask
182  * @full_cache_mask:	Full Cache Bit Mask (CBM) for the cache
183  *
184  * Return: The size of the cache portion in bytes.
185  */
186 static inline unsigned long cache_portion_size(unsigned long cache_size,
187 					       unsigned long portion_mask,
188 					       unsigned long full_cache_mask)
189 {
190 	unsigned int bits = count_bits(full_cache_mask);
191 
192 	/*
193 	 * With no bits the full CBM, assume cache cannot be split into
194 	 * smaller portions. To avoid divide by zero, return cache_size.
195 	 */
196 	if (!bits)
197 		return cache_size;
198 
199 	return cache_size * count_bits(portion_mask) / bits;
200 }
201 
202 extern struct resctrl_test mbm_test;
203 extern struct resctrl_test mba_test;
204 extern struct resctrl_test cmt_test;
205 extern struct resctrl_test l3_cat_test;
206 
207 #endif /* RESCTRL_H */
208