xref: /linux/tools/tracing/rtla/src/utils.c (revision 7e9dfccf8f11c26208211457c4597a466135b56a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Red Hat Inc, Daniel Bristot de Oliveira <bristot@kernel.org>
4  */
5 
6 #define _GNU_SOURCE
7 #ifdef HAVE_LIBCPUPOWER_SUPPORT
8 #include <cpuidle.h>
9 #endif /* HAVE_LIBCPUPOWER_SUPPORT */
10 #include <dirent.h>
11 #include <stdarg.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <unistd.h>
15 #include <ctype.h>
16 #include <errno.h>
17 #include <fcntl.h>
18 #include <sched.h>
19 #include <stdio.h>
20 #include <limits.h>
21 
22 #include "utils.h"
23 
24 #define MAX_MSG_LENGTH	1024
25 int config_debug;
26 
27 /*
28  * err_msg - print an error message to the stderr
29  */
30 void err_msg(const char *fmt, ...)
31 {
32 	char message[MAX_MSG_LENGTH];
33 	va_list ap;
34 
35 	va_start(ap, fmt);
36 	vsnprintf(message, sizeof(message), fmt, ap);
37 	va_end(ap);
38 
39 	fprintf(stderr, "%s", message);
40 }
41 
42 /*
43  * debug_msg - print a debug message to stderr if debug is set
44  */
45 void debug_msg(const char *fmt, ...)
46 {
47 	char message[MAX_MSG_LENGTH];
48 	va_list ap;
49 
50 	if (!config_debug)
51 		return;
52 
53 	va_start(ap, fmt);
54 	vsnprintf(message, sizeof(message), fmt, ap);
55 	va_end(ap);
56 
57 	fprintf(stderr, "%s", message);
58 }
59 
60 /*
61  * fatal - print an error message and EOL to stderr and exit with ERROR
62  */
63 void fatal(const char *fmt, ...)
64 {
65 	va_list ap;
66 
67 	va_start(ap, fmt);
68 	vfprintf(stderr, fmt, ap);
69 	va_end(ap);
70 	fprintf(stderr, "\n");
71 
72 	exit(ERROR);
73 }
74 
75 /*
76  * get_llong_from_str - get a long long int from a string
77  */
78 long long get_llong_from_str(char *start)
79 {
80 	long long value;
81 	char *end;
82 
83 	errno = 0;
84 	value = strtoll(start, &end, 10);
85 	if (errno || start == end)
86 		return -1;
87 
88 	return value;
89 }
90 
91 /*
92  * get_duration - fill output with a human readable duration since start_time
93  */
94 void get_duration(time_t start_time, char *output, int output_size)
95 {
96 	time_t now = time(NULL);
97 	struct tm *tm_info;
98 	time_t duration;
99 
100 	duration = difftime(now, start_time);
101 	tm_info = gmtime(&duration);
102 
103 	snprintf(output, output_size, "%3d %02d:%02d:%02d",
104 			tm_info->tm_yday,
105 			tm_info->tm_hour,
106 			tm_info->tm_min,
107 			tm_info->tm_sec);
108 }
109 
110 /*
111  * parse_cpu_set - parse a cpu_list filling cpu_set_t argument
112  *
113  * Receives a cpu list, like 1-3,5 (cpus 1, 2, 3, 5), and then set
114  * filling cpu_set_t argument.
115  *
116  * Returns 1 on success, 0 otherwise.
117  */
118 int parse_cpu_set(char *cpu_list, cpu_set_t *set)
119 {
120 	const char *p;
121 	int end_cpu;
122 	int nr_cpus;
123 	int cpu;
124 	int i;
125 
126 	CPU_ZERO(set);
127 
128 	nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
129 
130 	for (p = cpu_list; *p; ) {
131 		if (strtoi(p, &cpu))
132 			goto err;
133 		if (cpu < 0 || cpu >= nr_cpus)
134 			goto err;
135 
136 		while (isdigit(*p))
137 			p++;
138 		if (*p == '-') {
139 			p++;
140 			if (strtoi(p, &end_cpu))
141 				goto err;
142 			if (end_cpu < cpu || end_cpu >= nr_cpus)
143 				goto err;
144 			while (isdigit(*p))
145 				p++;
146 		} else
147 			end_cpu = cpu;
148 
149 		if (cpu == end_cpu) {
150 			debug_msg("cpu_set: adding cpu %d\n", cpu);
151 			CPU_SET(cpu, set);
152 		} else {
153 			for (i = cpu; i <= end_cpu; i++) {
154 				debug_msg("cpu_set: adding cpu %d\n", i);
155 				CPU_SET(i, set);
156 			}
157 		}
158 
159 		if (*p == ',')
160 			p++;
161 	}
162 
163 	return 0;
164 err:
165 	debug_msg("Error parsing the cpu set %s\n", cpu_list);
166 	return 1;
167 }
168 
169 /*
170  * parse_duration - parse duration with s/m/h/d suffix converting it to seconds
171  */
172 long parse_seconds_duration(char *val)
173 {
174 	char *end;
175 	long t;
176 
177 	t = strtol(val, &end, 10);
178 
179 	if (end) {
180 		switch (*end) {
181 		case 's':
182 		case 'S':
183 			break;
184 		case 'm':
185 		case 'M':
186 			t *= 60;
187 			break;
188 		case 'h':
189 		case 'H':
190 			t *= 60 * 60;
191 			break;
192 
193 		case 'd':
194 		case 'D':
195 			t *= 24 * 60 * 60;
196 			break;
197 		}
198 	}
199 
200 	return t;
201 }
202 
203 /*
204  * parse_ns_duration - parse duration with ns/us/ms/s converting it to nanoseconds
205  */
206 long parse_ns_duration(char *val)
207 {
208 	char *end;
209 	long t;
210 
211 	t = strtol(val, &end, 10);
212 
213 	if (end) {
214 		if (!strncmp(end, "ns", 2)) {
215 			return t;
216 		} else if (!strncmp(end, "us", 2)) {
217 			t *= 1000;
218 			return t;
219 		} else if (!strncmp(end, "ms", 2)) {
220 			t *= 1000 * 1000;
221 			return t;
222 		} else if (!strncmp(end, "s", 1)) {
223 			t *= 1000 * 1000 * 1000;
224 			return t;
225 		}
226 		return -1;
227 	}
228 
229 	return t;
230 }
231 
232 /*
233  * This is a set of helper functions to use SCHED_DEADLINE.
234  */
235 #ifndef __NR_sched_setattr
236 # ifdef __x86_64__
237 #  define __NR_sched_setattr	314
238 # elif __i386__
239 #  define __NR_sched_setattr	351
240 # elif __arm__
241 #  define __NR_sched_setattr	380
242 # elif __aarch64__ || __riscv
243 #  define __NR_sched_setattr	274
244 # elif __powerpc__
245 #  define __NR_sched_setattr	355
246 # elif __s390x__
247 #  define __NR_sched_setattr	345
248 # elif __loongarch__
249 #  define __NR_sched_setattr	274
250 # endif
251 #endif
252 
253 #define SCHED_DEADLINE		6
254 
255 static inline int syscall_sched_setattr(pid_t pid, const struct sched_attr *attr,
256 				unsigned int flags) {
257 	return syscall(__NR_sched_setattr, pid, attr, flags);
258 }
259 
260 int __set_sched_attr(int pid, struct sched_attr *attr)
261 {
262 	int flags = 0;
263 	int retval;
264 
265 	retval = syscall_sched_setattr(pid, attr, flags);
266 	if (retval < 0) {
267 		err_msg("Failed to set sched attributes to the pid %d: %s\n",
268 			pid, strerror(errno));
269 		return 1;
270 	}
271 
272 	return 0;
273 }
274 
275 /*
276  * procfs_is_workload_pid - check if a procfs entry contains a comm_prefix* comm
277  *
278  * Check if the procfs entry is a directory of a process, and then check if the
279  * process has a comm with the prefix set in char *comm_prefix. As the
280  * current users of this function only check for kernel threads, there is no
281  * need to check for the threads for the process.
282  *
283  * Return: True if the proc_entry contains a comm file with comm_prefix*.
284  * Otherwise returns false.
285  */
286 static int procfs_is_workload_pid(const char *comm_prefix, struct dirent *proc_entry)
287 {
288 	char buffer[MAX_PATH];
289 	int comm_fd, retval;
290 	char *t_name;
291 
292 	if (proc_entry->d_type != DT_DIR)
293 		return 0;
294 
295 	if (*proc_entry->d_name == '.')
296 		return 0;
297 
298 	/* check if the string is a pid */
299 	for (t_name = proc_entry->d_name; t_name; t_name++) {
300 		if (!isdigit(*t_name))
301 			break;
302 	}
303 
304 	if (*t_name != '\0')
305 		return 0;
306 
307 	snprintf(buffer, MAX_PATH, "/proc/%s/comm", proc_entry->d_name);
308 	comm_fd = open(buffer, O_RDONLY);
309 	if (comm_fd < 0)
310 		return 0;
311 
312 	memset(buffer, 0, MAX_PATH);
313 	retval = read(comm_fd, buffer, MAX_PATH);
314 
315 	close(comm_fd);
316 
317 	if (retval <= 0)
318 		return 0;
319 
320 	retval = strncmp(comm_prefix, buffer, strlen(comm_prefix));
321 	if (retval)
322 		return 0;
323 
324 	/* comm already have \n */
325 	debug_msg("Found workload pid:%s comm:%s", proc_entry->d_name, buffer);
326 
327 	return 1;
328 }
329 
330 /*
331  * set_comm_sched_attr - set sched params to threads starting with char *comm_prefix
332  *
333  * This function uses procfs to list the currently running threads and then set the
334  * sched_attr *attr to the threads that start with char *comm_prefix. It is
335  * mainly used to set the priority to the kernel threads created by the
336  * tracers.
337  */
338 int set_comm_sched_attr(const char *comm_prefix, struct sched_attr *attr)
339 {
340 	struct dirent *proc_entry;
341 	DIR *procfs;
342 	int retval;
343 	int pid;
344 
345 	if (strlen(comm_prefix) >= MAX_PATH) {
346 		err_msg("Command prefix is too long: %d < strlen(%s)\n",
347 			MAX_PATH, comm_prefix);
348 		return 1;
349 	}
350 
351 	procfs = opendir("/proc");
352 	if (!procfs) {
353 		err_msg("Could not open procfs\n");
354 		return 1;
355 	}
356 
357 	while ((proc_entry = readdir(procfs))) {
358 
359 		retval = procfs_is_workload_pid(comm_prefix, proc_entry);
360 		if (!retval)
361 			continue;
362 
363 		if (strtoi(proc_entry->d_name, &pid)) {
364 			err_msg("'%s' is not a valid pid", proc_entry->d_name);
365 			goto out_err;
366 		}
367 		/* procfs_is_workload_pid confirmed it is a pid */
368 		retval = __set_sched_attr(pid, attr);
369 		if (retval) {
370 			err_msg("Error setting sched attributes for pid:%s\n", proc_entry->d_name);
371 			goto out_err;
372 		}
373 
374 		debug_msg("Set sched attributes for pid:%s\n", proc_entry->d_name);
375 	}
376 	return 0;
377 
378 out_err:
379 	closedir(procfs);
380 	return 1;
381 }
382 
383 #define INVALID_VAL	(~0L)
384 static long get_long_ns_after_colon(char *start)
385 {
386 	long val = INVALID_VAL;
387 
388 	/* find the ":" */
389 	start = strstr(start, ":");
390 	if (!start)
391 		return -1;
392 
393 	/* skip ":" */
394 	start++;
395 	val = parse_ns_duration(start);
396 
397 	return val;
398 }
399 
400 static long get_long_after_colon(char *start)
401 {
402 	long val = INVALID_VAL;
403 
404 	/* find the ":" */
405 	start = strstr(start, ":");
406 	if (!start)
407 		return -1;
408 
409 	/* skip ":" */
410 	start++;
411 	val = get_llong_from_str(start);
412 
413 	return val;
414 }
415 
416 /*
417  * parse priority in the format:
418  * SCHED_OTHER:
419  *		o:<prio>
420  *		O:<prio>
421  * SCHED_RR:
422  *		r:<prio>
423  *		R:<prio>
424  * SCHED_FIFO:
425  *		f:<prio>
426  *		F:<prio>
427  * SCHED_DEADLINE:
428  *		d:runtime:period
429  *		D:runtime:period
430  */
431 int parse_prio(char *arg, struct sched_attr *sched_param)
432 {
433 	long prio;
434 	long runtime;
435 	long period;
436 
437 	memset(sched_param, 0, sizeof(*sched_param));
438 	sched_param->size = sizeof(*sched_param);
439 
440 	switch (arg[0]) {
441 	case 'd':
442 	case 'D':
443 		/* d:runtime:period */
444 		if (strlen(arg) < 4)
445 			return -1;
446 
447 		runtime = get_long_ns_after_colon(arg);
448 		if (runtime == INVALID_VAL)
449 			return -1;
450 
451 		period = get_long_ns_after_colon(&arg[2]);
452 		if (period == INVALID_VAL)
453 			return -1;
454 
455 		if (runtime > period)
456 			return -1;
457 
458 		sched_param->sched_policy   = SCHED_DEADLINE;
459 		sched_param->sched_runtime  = runtime;
460 		sched_param->sched_deadline = period;
461 		sched_param->sched_period   = period;
462 		break;
463 	case 'f':
464 	case 'F':
465 		/* f:prio */
466 		prio = get_long_after_colon(arg);
467 		if (prio == INVALID_VAL)
468 			return -1;
469 
470 		if (prio < sched_get_priority_min(SCHED_FIFO))
471 			return -1;
472 		if (prio > sched_get_priority_max(SCHED_FIFO))
473 			return -1;
474 
475 		sched_param->sched_policy   = SCHED_FIFO;
476 		sched_param->sched_priority = prio;
477 		break;
478 	case 'r':
479 	case 'R':
480 		/* r:prio */
481 		prio = get_long_after_colon(arg);
482 		if (prio == INVALID_VAL)
483 			return -1;
484 
485 		if (prio < sched_get_priority_min(SCHED_RR))
486 			return -1;
487 		if (prio > sched_get_priority_max(SCHED_RR))
488 			return -1;
489 
490 		sched_param->sched_policy   = SCHED_RR;
491 		sched_param->sched_priority = prio;
492 		break;
493 	case 'o':
494 	case 'O':
495 		/* o:prio */
496 		prio = get_long_after_colon(arg);
497 		if (prio == INVALID_VAL)
498 			return -1;
499 
500 		if (prio < MIN_NICE)
501 			return -1;
502 		if (prio > MAX_NICE)
503 			return -1;
504 
505 		sched_param->sched_policy   = SCHED_OTHER;
506 		sched_param->sched_nice = prio;
507 		break;
508 	default:
509 		return -1;
510 	}
511 	return 0;
512 }
513 
514 /*
515  * set_cpu_dma_latency - set the /dev/cpu_dma_latecy
516  *
517  * This is used to reduce the exit from idle latency. The value
518  * will be reset once the file descriptor of /dev/cpu_dma_latecy
519  * is closed.
520  *
521  * Return: the /dev/cpu_dma_latecy file descriptor
522  */
523 int set_cpu_dma_latency(int32_t latency)
524 {
525 	int retval;
526 	int fd;
527 
528 	fd = open("/dev/cpu_dma_latency", O_RDWR);
529 	if (fd < 0) {
530 		err_msg("Error opening /dev/cpu_dma_latency\n");
531 		return -1;
532 	}
533 
534 	retval = write(fd, &latency, 4);
535 	if (retval < 1) {
536 		err_msg("Error setting /dev/cpu_dma_latency\n");
537 		close(fd);
538 		return -1;
539 	}
540 
541 	debug_msg("Set /dev/cpu_dma_latency to %d\n", latency);
542 
543 	return fd;
544 }
545 
546 #ifdef HAVE_LIBCPUPOWER_SUPPORT
547 static unsigned int **saved_cpu_idle_disable_state;
548 static size_t saved_cpu_idle_disable_state_alloc_ctr;
549 
550 /*
551  * save_cpu_idle_state_disable - save disable for all idle states of a cpu
552  *
553  * Saves the current disable of all idle states of a cpu, to be subsequently
554  * restored via restore_cpu_idle_disable_state.
555  *
556  * Return: idle state count on success, negative on error
557  */
558 int save_cpu_idle_disable_state(unsigned int cpu)
559 {
560 	unsigned int nr_states;
561 	unsigned int state;
562 	int disabled;
563 	int nr_cpus;
564 
565 	nr_states = cpuidle_state_count(cpu);
566 
567 	if (nr_states == 0)
568 		return 0;
569 
570 	if (saved_cpu_idle_disable_state == NULL) {
571 		nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
572 		saved_cpu_idle_disable_state = calloc(nr_cpus, sizeof(unsigned int *));
573 		if (!saved_cpu_idle_disable_state)
574 			return -1;
575 	}
576 
577 	saved_cpu_idle_disable_state[cpu] = calloc(nr_states, sizeof(unsigned int));
578 	if (!saved_cpu_idle_disable_state[cpu])
579 		return -1;
580 	saved_cpu_idle_disable_state_alloc_ctr++;
581 
582 	for (state = 0; state < nr_states; state++) {
583 		disabled = cpuidle_is_state_disabled(cpu, state);
584 		if (disabled < 0)
585 			return disabled;
586 		saved_cpu_idle_disable_state[cpu][state] = disabled;
587 	}
588 
589 	return nr_states;
590 }
591 
592 /*
593  * restore_cpu_idle_disable_state - restore disable for all idle states of a cpu
594  *
595  * Restores the current disable state of all idle states of a cpu that was
596  * previously saved by save_cpu_idle_disable_state.
597  *
598  * Return: idle state count on success, negative on error
599  */
600 int restore_cpu_idle_disable_state(unsigned int cpu)
601 {
602 	unsigned int nr_states;
603 	unsigned int state;
604 	int disabled;
605 	int result;
606 
607 	nr_states = cpuidle_state_count(cpu);
608 
609 	if (nr_states == 0)
610 		return 0;
611 
612 	if (!saved_cpu_idle_disable_state)
613 		return -1;
614 
615 	for (state = 0; state < nr_states; state++) {
616 		if (!saved_cpu_idle_disable_state[cpu])
617 			return -1;
618 		disabled = saved_cpu_idle_disable_state[cpu][state];
619 		result = cpuidle_state_disable(cpu, state, disabled);
620 		if (result < 0)
621 			return result;
622 	}
623 
624 	free(saved_cpu_idle_disable_state[cpu]);
625 	saved_cpu_idle_disable_state[cpu] = NULL;
626 	saved_cpu_idle_disable_state_alloc_ctr--;
627 	if (saved_cpu_idle_disable_state_alloc_ctr == 0) {
628 		free(saved_cpu_idle_disable_state);
629 		saved_cpu_idle_disable_state = NULL;
630 	}
631 
632 	return nr_states;
633 }
634 
635 /*
636  * free_cpu_idle_disable_states - free saved idle state disable for all cpus
637  *
638  * Frees the memory used for storing cpu idle state disable for all cpus
639  * and states.
640  *
641  * Normally, the memory is freed automatically in
642  * restore_cpu_idle_disable_state; this is mostly for cleaning up after an
643  * error.
644  */
645 void free_cpu_idle_disable_states(void)
646 {
647 	int cpu;
648 	int nr_cpus;
649 
650 	if (!saved_cpu_idle_disable_state)
651 		return;
652 
653 	nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
654 
655 	for (cpu = 0; cpu < nr_cpus; cpu++) {
656 		free(saved_cpu_idle_disable_state[cpu]);
657 		saved_cpu_idle_disable_state[cpu] = NULL;
658 	}
659 
660 	free(saved_cpu_idle_disable_state);
661 	saved_cpu_idle_disable_state = NULL;
662 }
663 
664 /*
665  * set_deepest_cpu_idle_state - limit idle state of cpu
666  *
667  * Disables all idle states deeper than the one given in
668  * deepest_state (assuming states with higher number are deeper).
669  *
670  * This is used to reduce the exit from idle latency. Unlike
671  * set_cpu_dma_latency, it can disable idle states per cpu.
672  *
673  * Return: idle state count on success, negative on error
674  */
675 int set_deepest_cpu_idle_state(unsigned int cpu, unsigned int deepest_state)
676 {
677 	unsigned int nr_states;
678 	unsigned int state;
679 	int result;
680 
681 	nr_states = cpuidle_state_count(cpu);
682 
683 	for (state = deepest_state + 1; state < nr_states; state++) {
684 		result = cpuidle_state_disable(cpu, state, 1);
685 		if (result < 0)
686 			return result;
687 	}
688 
689 	return nr_states;
690 }
691 #endif /* HAVE_LIBCPUPOWER_SUPPORT */
692 
693 #define _STR(x) #x
694 #define STR(x) _STR(x)
695 
696 /*
697  * find_mount - find a the mount point of a given fs
698  *
699  * Returns 0 if mount is not found, otherwise return 1 and fill mp
700  * with the mount point.
701  */
702 static const int find_mount(const char *fs, char *mp, int sizeof_mp)
703 {
704 	char mount_point[MAX_PATH+1];
705 	char type[100];
706 	int found = 0;
707 	FILE *fp;
708 
709 	fp = fopen("/proc/mounts", "r");
710 	if (!fp)
711 		return 0;
712 
713 	while (fscanf(fp, "%*s %" STR(MAX_PATH) "s %99s %*s %*d %*d\n",	mount_point, type) == 2) {
714 		if (strcmp(type, fs) == 0) {
715 			found = 1;
716 			break;
717 		}
718 	}
719 	fclose(fp);
720 
721 	if (!found)
722 		return 0;
723 
724 	memset(mp, 0, sizeof_mp);
725 	strncpy(mp, mount_point, sizeof_mp - 1);
726 
727 	debug_msg("Fs %s found at %s\n", fs, mp);
728 	return 1;
729 }
730 
731 /*
732  * get_self_cgroup - get the current thread cgroup path
733  *
734  * Parse /proc/$$/cgroup file to get the thread's cgroup. As an example of line to parse:
735  *
736  * 0::/user.slice/user-0.slice/session-3.scope'\n'
737  *
738  * This function is interested in the content after the second : and before the '\n'.
739  *
740  * Returns 1 if a string was found, 0 otherwise.
741  */
742 static int get_self_cgroup(char *self_cg, int sizeof_self_cg)
743 {
744 	char path[MAX_PATH], *start;
745 	int fd, retval;
746 
747 	snprintf(path, MAX_PATH, "/proc/%d/cgroup", getpid());
748 
749 	fd = open(path, O_RDONLY);
750 	if (fd < 0)
751 		return 0;
752 
753 	retval = read(fd, path, MAX_PATH);
754 
755 	close(fd);
756 
757 	if (retval <= 0)
758 		return 0;
759 
760 	start = path;
761 
762 	start = strstr(start, ":");
763 	if (!start)
764 		return 0;
765 
766 	/* skip ":" */
767 	start++;
768 
769 	start = strstr(start, ":");
770 	if (!start)
771 		return 0;
772 
773 	/* skip ":" */
774 	start++;
775 
776 	if (strlen(start) >= sizeof_self_cg)
777 		return 0;
778 
779 	snprintf(self_cg, sizeof_self_cg, "%s", start);
780 
781 	/* Swap '\n' with '\0' */
782 	start = strstr(self_cg, "\n");
783 
784 	/* there must be '\n' */
785 	if (!start)
786 		return 0;
787 
788 	/* ok, it found a string after the second : and before the \n */
789 	*start = '\0';
790 
791 	return 1;
792 }
793 
794 /*
795  * open_cgroup_procs - Open the cgroup.procs file for the given cgroup
796  *
797  * If cgroup argument is not NULL, the cgroup.procs file for that cgroup
798  * will be opened. Otherwise, the cgroup of the calling, i.e., rtla, thread
799  * will be used.
800  *
801  * Supports cgroup v2.
802  *
803  * Returns the file descriptor on success, -1 otherwise.
804  */
805 static int open_cgroup_procs(const char *cgroup)
806 {
807 	char cgroup_path[MAX_PATH - strlen("/cgroup.procs")];
808 	char cgroup_procs[MAX_PATH];
809 	int retval;
810 	int cg_fd;
811 
812 	retval = find_mount("cgroup2", cgroup_path, sizeof(cgroup_path));
813 	if (!retval) {
814 		err_msg("Did not find cgroupv2 mount point\n");
815 		return -1;
816 	}
817 
818 	if (!cgroup) {
819 		retval = get_self_cgroup(&cgroup_path[strlen(cgroup_path)],
820 				sizeof(cgroup_path) - strlen(cgroup_path));
821 		if (!retval) {
822 			err_msg("Did not find self cgroup\n");
823 			return -1;
824 		}
825 	} else {
826 		snprintf(&cgroup_path[strlen(cgroup_path)],
827 				sizeof(cgroup_path) - strlen(cgroup_path), "%s/", cgroup);
828 	}
829 
830 	snprintf(cgroup_procs, MAX_PATH, "%s/cgroup.procs", cgroup_path);
831 
832 	debug_msg("Using cgroup path at: %s\n", cgroup_procs);
833 
834 	cg_fd = open(cgroup_procs, O_RDWR);
835 	if (cg_fd < 0)
836 		return -1;
837 
838 	return cg_fd;
839 }
840 
841 /*
842  * set_pid_cgroup - Set cgroup to pid_t pid
843  *
844  * If cgroup argument is not NULL, the threads will move to the given cgroup.
845  * Otherwise, the cgroup of the calling, i.e., rtla, thread will be used.
846  *
847  * Supports cgroup v2.
848  *
849  * Returns 1 on success, 0 otherwise.
850  */
851 int set_pid_cgroup(pid_t pid, const char *cgroup)
852 {
853 	char pid_str[24];
854 	int retval;
855 	int cg_fd;
856 
857 	cg_fd = open_cgroup_procs(cgroup);
858 	if (cg_fd < 0)
859 		return 0;
860 
861 	snprintf(pid_str, sizeof(pid_str), "%d\n", pid);
862 
863 	retval = write(cg_fd, pid_str, strlen(pid_str));
864 	if (retval < 0)
865 		err_msg("Error setting cgroup attributes for pid:%s - %s\n",
866 				pid_str, strerror(errno));
867 	else
868 		debug_msg("Set cgroup attributes for pid:%s\n", pid_str);
869 
870 	close(cg_fd);
871 
872 	return (retval >= 0);
873 }
874 
875 /**
876  * set_comm_cgroup - Set cgroup to threads starting with char *comm_prefix
877  *
878  * If cgroup argument is not NULL, the threads will move to the given cgroup.
879  * Otherwise, the cgroup of the calling, i.e., rtla, thread will be used.
880  *
881  * Supports cgroup v2.
882  *
883  * Returns 1 on success, 0 otherwise.
884  */
885 int set_comm_cgroup(const char *comm_prefix, const char *cgroup)
886 {
887 	struct dirent *proc_entry;
888 	DIR *procfs;
889 	int retval;
890 	int cg_fd;
891 
892 	if (strlen(comm_prefix) >= MAX_PATH) {
893 		err_msg("Command prefix is too long: %d < strlen(%s)\n",
894 			MAX_PATH, comm_prefix);
895 		return 0;
896 	}
897 
898 	cg_fd = open_cgroup_procs(cgroup);
899 	if (cg_fd < 0)
900 		return 0;
901 
902 	procfs = opendir("/proc");
903 	if (!procfs) {
904 		err_msg("Could not open procfs\n");
905 		goto out_cg;
906 	}
907 
908 	while ((proc_entry = readdir(procfs))) {
909 
910 		retval = procfs_is_workload_pid(comm_prefix, proc_entry);
911 		if (!retval)
912 			continue;
913 
914 		retval = write(cg_fd, proc_entry->d_name, strlen(proc_entry->d_name));
915 		if (retval < 0) {
916 			err_msg("Error setting cgroup attributes for pid:%s - %s\n",
917 				proc_entry->d_name, strerror(errno));
918 			goto out_procfs;
919 		}
920 
921 		debug_msg("Set cgroup attributes for pid:%s\n", proc_entry->d_name);
922 	}
923 
924 	closedir(procfs);
925 	close(cg_fd);
926 	return 1;
927 
928 out_procfs:
929 	closedir(procfs);
930 out_cg:
931 	close(cg_fd);
932 	return 0;
933 }
934 
935 /**
936  * auto_house_keeping - Automatically move rtla out of measurement threads
937  *
938  * Try to move rtla away from the tracer, if possible.
939  *
940  * Returns 1 on success, 0 otherwise.
941  */
942 int auto_house_keeping(cpu_set_t *monitored_cpus)
943 {
944 	cpu_set_t rtla_cpus, house_keeping_cpus;
945 	int retval;
946 
947 	/* first get the CPUs in which rtla can actually run. */
948 	retval = sched_getaffinity(getpid(), sizeof(rtla_cpus), &rtla_cpus);
949 	if (retval == -1) {
950 		debug_msg("Could not get rtla affinity, rtla might run with the threads!\n");
951 		return 0;
952 	}
953 
954 	/* then check if the existing setup is already good. */
955 	CPU_AND(&house_keeping_cpus, &rtla_cpus, monitored_cpus);
956 	if (!CPU_COUNT(&house_keeping_cpus)) {
957 		debug_msg("rtla and the monitored CPUs do not share CPUs.");
958 		debug_msg("Skipping auto house-keeping\n");
959 		return 1;
960 	}
961 
962 	/* remove the intersection */
963 	CPU_XOR(&house_keeping_cpus, &rtla_cpus, monitored_cpus);
964 
965 	/* get only those that rtla can run */
966 	CPU_AND(&house_keeping_cpus, &house_keeping_cpus, &rtla_cpus);
967 
968 	/* is there any cpu left? */
969 	if (!CPU_COUNT(&house_keeping_cpus)) {
970 		debug_msg("Could not find any CPU for auto house-keeping\n");
971 		return 0;
972 	}
973 
974 	retval = sched_setaffinity(getpid(), sizeof(house_keeping_cpus), &house_keeping_cpus);
975 	if (retval == -1) {
976 		debug_msg("Could not set affinity for auto house-keeping\n");
977 		return 0;
978 	}
979 
980 	debug_msg("rtla automatically moved to an auto house-keeping cpu set\n");
981 
982 	return 1;
983 }
984 
985 /**
986  * parse_optional_arg - Parse optional argument value
987  *
988  * Parse optional argument value, which can be in the form of:
989  * -sarg, -s/--long=arg, -s/--long arg
990  *
991  * Returns arg value if found, NULL otherwise.
992  */
993 char *parse_optional_arg(int argc, char **argv)
994 {
995 	if (optarg) {
996 		if (optarg[0] == '=') {
997 			/* skip the = */
998 			return &optarg[1];
999 		} else {
1000 			return optarg;
1001 		}
1002 	/* parse argument of form -s [arg] and --long [arg]*/
1003 	} else if (optind < argc && argv[optind][0] != '-') {
1004 		/* consume optind */
1005 		return argv[optind++];
1006 	} else {
1007 		return NULL;
1008 	}
1009 }
1010 
1011 /*
1012  * strtoi - convert string to integer with error checking
1013  *
1014  * Returns 0 on success, -1 if conversion fails or result is out of int range.
1015  */
1016 int strtoi(const char *s, int *res)
1017 {
1018 	char *end_ptr;
1019 	long lres;
1020 
1021 	if (!*s)
1022 		return -1;
1023 
1024 	errno = 0;
1025 	lres = strtol(s, &end_ptr, 0);
1026 	if (errno || *end_ptr || lres > INT_MAX || lres < INT_MIN)
1027 		return -1;
1028 
1029 	*res = (int) lres;
1030 	return 0;
1031 }
1032