xref: /linux/tools/sched_ext/scx_userland.c (revision 4cf44657887b4c41374981d0afb2ca302b189e15)
1cc4448d0SEmil Tsalapatis /* SPDX-License-Identifier: GPL-2.0 */
2cc4448d0SEmil Tsalapatis /*
3cc4448d0SEmil Tsalapatis  * A demo sched_ext user space scheduler which provides vruntime semantics
4cc4448d0SEmil Tsalapatis  * using a simple ordered-list implementation.
5cc4448d0SEmil Tsalapatis  *
6cc4448d0SEmil Tsalapatis  * Each CPU in the system resides in a single, global domain. This precludes
7cc4448d0SEmil Tsalapatis  * the need to do any load balancing between domains. The scheduler could
8cc4448d0SEmil Tsalapatis  * easily be extended to support multiple domains, with load balancing
9cc4448d0SEmil Tsalapatis  * happening in user space.
10cc4448d0SEmil Tsalapatis  *
11cc4448d0SEmil Tsalapatis  * Any task which has any CPU affinity is scheduled entirely in BPF. This
12cc4448d0SEmil Tsalapatis  * program only schedules tasks which may run on any CPU.
13cc4448d0SEmil Tsalapatis  *
14cc4448d0SEmil Tsalapatis  * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
15cc4448d0SEmil Tsalapatis  * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
16cc4448d0SEmil Tsalapatis  * Copyright (c) 2022 David Vernet <dvernet@meta.com>
17cc4448d0SEmil Tsalapatis  */
18cc4448d0SEmil Tsalapatis #include <stdio.h>
19cc4448d0SEmil Tsalapatis #include <unistd.h>
20cc4448d0SEmil Tsalapatis #include <sched.h>
21cc4448d0SEmil Tsalapatis #include <signal.h>
22cc4448d0SEmil Tsalapatis #include <assert.h>
23cc4448d0SEmil Tsalapatis #include <libgen.h>
24cc4448d0SEmil Tsalapatis #include <pthread.h>
25cc4448d0SEmil Tsalapatis #include <bpf/bpf.h>
26cc4448d0SEmil Tsalapatis #include <sys/mman.h>
27cc4448d0SEmil Tsalapatis #include <sys/queue.h>
28cc4448d0SEmil Tsalapatis #include <sys/syscall.h>
29cc4448d0SEmil Tsalapatis 
30cc4448d0SEmil Tsalapatis #include <scx/common.h>
31cc4448d0SEmil Tsalapatis #include "scx_userland.h"
32cc4448d0SEmil Tsalapatis #include "scx_userland.bpf.skel.h"
33cc4448d0SEmil Tsalapatis 
34cc4448d0SEmil Tsalapatis const char help_fmt[] =
35cc4448d0SEmil Tsalapatis "A minimal userland sched_ext scheduler.\n"
36cc4448d0SEmil Tsalapatis "\n"
37cc4448d0SEmil Tsalapatis "See the top-level comment in .bpf.c for more details.\n"
38cc4448d0SEmil Tsalapatis "\n"
39cc4448d0SEmil Tsalapatis "Try to reduce `sysctl kernel.pid_max` if this program triggers OOMs.\n"
40cc4448d0SEmil Tsalapatis "\n"
41cc4448d0SEmil Tsalapatis "Usage: %s [-b BATCH]\n"
42cc4448d0SEmil Tsalapatis "\n"
43cc4448d0SEmil Tsalapatis "  -b BATCH      The number of tasks to batch when dispatching (default: 8)\n"
44cc4448d0SEmil Tsalapatis "  -v            Print libbpf debug messages\n"
45cc4448d0SEmil Tsalapatis "  -h            Display this help and exit\n";
46cc4448d0SEmil Tsalapatis 
47cc4448d0SEmil Tsalapatis /* Defined in UAPI */
48cc4448d0SEmil Tsalapatis #define SCHED_EXT 7
49cc4448d0SEmil Tsalapatis 
50cc4448d0SEmil Tsalapatis /* Number of tasks to batch when dispatching to user space. */
51cc4448d0SEmil Tsalapatis static __u32 batch_size = 8;
52cc4448d0SEmil Tsalapatis 
53cc4448d0SEmil Tsalapatis static bool verbose;
54cc4448d0SEmil Tsalapatis static volatile int exit_req;
55cc4448d0SEmil Tsalapatis static int enqueued_fd, dispatched_fd;
56cc4448d0SEmil Tsalapatis 
57048714d9SDavid Carlier static pthread_t stats_printer;
58cc4448d0SEmil Tsalapatis static struct scx_userland *skel;
59cc4448d0SEmil Tsalapatis static struct bpf_link *ops_link;
60cc4448d0SEmil Tsalapatis 
61cc4448d0SEmil Tsalapatis /* Stats collected in user space. */
62cc4448d0SEmil Tsalapatis static __u64 nr_vruntime_enqueues, nr_vruntime_dispatches, nr_vruntime_failed;
63cc4448d0SEmil Tsalapatis 
64cc4448d0SEmil Tsalapatis /* Number of tasks currently enqueued. */
65cc4448d0SEmil Tsalapatis static __u64 nr_curr_enqueued;
66cc4448d0SEmil Tsalapatis 
67cc4448d0SEmil Tsalapatis /* The data structure containing tasks that are enqueued in user space. */
68cc4448d0SEmil Tsalapatis struct enqueued_task {
69cc4448d0SEmil Tsalapatis 	LIST_ENTRY(enqueued_task) entries;
70cc4448d0SEmil Tsalapatis 	__u64 sum_exec_runtime;
71cc4448d0SEmil Tsalapatis 	double vruntime;
72cc4448d0SEmil Tsalapatis };
73cc4448d0SEmil Tsalapatis 
74cc4448d0SEmil Tsalapatis /*
75cc4448d0SEmil Tsalapatis  * Use a vruntime-sorted list to store tasks. This could easily be extended to
76cc4448d0SEmil Tsalapatis  * a more optimal data structure, such as an rbtree as is done in CFS. We
77cc4448d0SEmil Tsalapatis  * currently elect to use a sorted list to simplify the example for
78cc4448d0SEmil Tsalapatis  * illustrative purposes.
79cc4448d0SEmil Tsalapatis  */
80cc4448d0SEmil Tsalapatis LIST_HEAD(listhead, enqueued_task);
81cc4448d0SEmil Tsalapatis 
82cc4448d0SEmil Tsalapatis /*
83cc4448d0SEmil Tsalapatis  * A vruntime-sorted list of tasks. The head of the list contains the task with
84cc4448d0SEmil Tsalapatis  * the lowest vruntime. That is, the task that has the "highest" claim to be
85cc4448d0SEmil Tsalapatis  * scheduled.
86cc4448d0SEmil Tsalapatis  */
87cc4448d0SEmil Tsalapatis static struct listhead vruntime_head = LIST_HEAD_INITIALIZER(vruntime_head);
88cc4448d0SEmil Tsalapatis 
89cc4448d0SEmil Tsalapatis /*
90cc4448d0SEmil Tsalapatis  * The main array of tasks. The array is allocated all at once during
91cc4448d0SEmil Tsalapatis  * initialization, based on /proc/sys/kernel/pid_max, to avoid having to
92cc4448d0SEmil Tsalapatis  * dynamically allocate memory on the enqueue path, which could cause a
93cc4448d0SEmil Tsalapatis  * deadlock. A more substantive user space scheduler could e.g. provide a hook
94cc4448d0SEmil Tsalapatis  * for newly enabled tasks that are passed to the scheduler from the
95cc4448d0SEmil Tsalapatis  * .prep_enable() callback to allows the scheduler to allocate on safe paths.
96cc4448d0SEmil Tsalapatis  */
97cc4448d0SEmil Tsalapatis struct enqueued_task *tasks;
98cc4448d0SEmil Tsalapatis static int pid_max;
99cc4448d0SEmil Tsalapatis 
100cc4448d0SEmil Tsalapatis static double min_vruntime;
101cc4448d0SEmil Tsalapatis 
libbpf_print_fn(enum libbpf_print_level level,const char * format,va_list args)102cc4448d0SEmil Tsalapatis static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args)
103cc4448d0SEmil Tsalapatis {
104cc4448d0SEmil Tsalapatis 	if (level == LIBBPF_DEBUG && !verbose)
105cc4448d0SEmil Tsalapatis 		return 0;
106cc4448d0SEmil Tsalapatis 	return vfprintf(stderr, format, args);
107cc4448d0SEmil Tsalapatis }
108cc4448d0SEmil Tsalapatis 
sigint_handler(int userland)109cc4448d0SEmil Tsalapatis static void sigint_handler(int userland)
110cc4448d0SEmil Tsalapatis {
111cc4448d0SEmil Tsalapatis 	exit_req = 1;
112cc4448d0SEmil Tsalapatis }
113cc4448d0SEmil Tsalapatis 
get_pid_max(void)114cc4448d0SEmil Tsalapatis static int get_pid_max(void)
115cc4448d0SEmil Tsalapatis {
116cc4448d0SEmil Tsalapatis 	FILE *fp;
117cc4448d0SEmil Tsalapatis 	int pid_max;
118cc4448d0SEmil Tsalapatis 
119cc4448d0SEmil Tsalapatis 	fp = fopen("/proc/sys/kernel/pid_max", "r");
120cc4448d0SEmil Tsalapatis 	if (fp == NULL) {
121cc4448d0SEmil Tsalapatis 		fprintf(stderr, "Error opening /proc/sys/kernel/pid_max\n");
122cc4448d0SEmil Tsalapatis 		return -1;
123cc4448d0SEmil Tsalapatis 	}
124cc4448d0SEmil Tsalapatis 	if (fscanf(fp, "%d", &pid_max) != 1) {
125cc4448d0SEmil Tsalapatis 		fprintf(stderr, "Error reading from /proc/sys/kernel/pid_max\n");
126cc4448d0SEmil Tsalapatis 		fclose(fp);
127cc4448d0SEmil Tsalapatis 		return -1;
128cc4448d0SEmil Tsalapatis 	}
129cc4448d0SEmil Tsalapatis 	fclose(fp);
130cc4448d0SEmil Tsalapatis 
131cc4448d0SEmil Tsalapatis 	return pid_max;
132cc4448d0SEmil Tsalapatis }
133cc4448d0SEmil Tsalapatis 
init_tasks(void)134cc4448d0SEmil Tsalapatis static int init_tasks(void)
135cc4448d0SEmil Tsalapatis {
136cc4448d0SEmil Tsalapatis 	pid_max = get_pid_max();
137cc4448d0SEmil Tsalapatis 	if (pid_max < 0)
138cc4448d0SEmil Tsalapatis 		return pid_max;
139cc4448d0SEmil Tsalapatis 
140cc4448d0SEmil Tsalapatis 	tasks = calloc(pid_max, sizeof(*tasks));
141cc4448d0SEmil Tsalapatis 	if (!tasks) {
142cc4448d0SEmil Tsalapatis 		fprintf(stderr, "Error allocating tasks array\n");
143cc4448d0SEmil Tsalapatis 		return -ENOMEM;
144cc4448d0SEmil Tsalapatis 	}
145cc4448d0SEmil Tsalapatis 
146cc4448d0SEmil Tsalapatis 	return 0;
147cc4448d0SEmil Tsalapatis }
148cc4448d0SEmil Tsalapatis 
task_pid(const struct enqueued_task * task)149cc4448d0SEmil Tsalapatis static __u32 task_pid(const struct enqueued_task *task)
150cc4448d0SEmil Tsalapatis {
151cc4448d0SEmil Tsalapatis 	return ((uintptr_t)task - (uintptr_t)tasks) / sizeof(*task);
152cc4448d0SEmil Tsalapatis }
153cc4448d0SEmil Tsalapatis 
dispatch_task(__s32 pid)154cc4448d0SEmil Tsalapatis static int dispatch_task(__s32 pid)
155cc4448d0SEmil Tsalapatis {
156cc4448d0SEmil Tsalapatis 	int err;
157cc4448d0SEmil Tsalapatis 
158cc4448d0SEmil Tsalapatis 	err = bpf_map_update_elem(dispatched_fd, NULL, &pid, 0);
159cc4448d0SEmil Tsalapatis 	if (err) {
160*f892f9f9SDavid Carlier 		__atomic_add_fetch(&nr_vruntime_failed, 1, __ATOMIC_RELAXED);
161cc4448d0SEmil Tsalapatis 	} else {
162*f892f9f9SDavid Carlier 		__atomic_add_fetch(&nr_vruntime_dispatches, 1, __ATOMIC_RELAXED);
163cc4448d0SEmil Tsalapatis 	}
164cc4448d0SEmil Tsalapatis 
165cc4448d0SEmil Tsalapatis 	return err;
166cc4448d0SEmil Tsalapatis }
167cc4448d0SEmil Tsalapatis 
get_enqueued_task(__s32 pid)168cc4448d0SEmil Tsalapatis static struct enqueued_task *get_enqueued_task(__s32 pid)
169cc4448d0SEmil Tsalapatis {
170cc4448d0SEmil Tsalapatis 	if (pid >= pid_max)
171cc4448d0SEmil Tsalapatis 		return NULL;
172cc4448d0SEmil Tsalapatis 
173cc4448d0SEmil Tsalapatis 	return &tasks[pid];
174cc4448d0SEmil Tsalapatis }
175cc4448d0SEmil Tsalapatis 
calc_vruntime_delta(__u64 weight,__u64 delta)176cc4448d0SEmil Tsalapatis static double calc_vruntime_delta(__u64 weight, __u64 delta)
177cc4448d0SEmil Tsalapatis {
178cc4448d0SEmil Tsalapatis 	double weight_f = (double)weight / 100.0;
179cc4448d0SEmil Tsalapatis 	double delta_f = (double)delta;
180cc4448d0SEmil Tsalapatis 
181cc4448d0SEmil Tsalapatis 	return delta_f / weight_f;
182cc4448d0SEmil Tsalapatis }
183cc4448d0SEmil Tsalapatis 
update_enqueued(struct enqueued_task * enqueued,const struct scx_userland_enqueued_task * bpf_task)184cc4448d0SEmil Tsalapatis static void update_enqueued(struct enqueued_task *enqueued, const struct scx_userland_enqueued_task *bpf_task)
185cc4448d0SEmil Tsalapatis {
186cc4448d0SEmil Tsalapatis 	__u64 delta;
187cc4448d0SEmil Tsalapatis 
188cc4448d0SEmil Tsalapatis 	delta = bpf_task->sum_exec_runtime - enqueued->sum_exec_runtime;
189cc4448d0SEmil Tsalapatis 
190cc4448d0SEmil Tsalapatis 	enqueued->vruntime += calc_vruntime_delta(bpf_task->weight, delta);
191cc4448d0SEmil Tsalapatis 	if (min_vruntime > enqueued->vruntime)
192cc4448d0SEmil Tsalapatis 		enqueued->vruntime = min_vruntime;
193cc4448d0SEmil Tsalapatis 	enqueued->sum_exec_runtime = bpf_task->sum_exec_runtime;
194cc4448d0SEmil Tsalapatis }
195cc4448d0SEmil Tsalapatis 
vruntime_enqueue(const struct scx_userland_enqueued_task * bpf_task)196cc4448d0SEmil Tsalapatis static int vruntime_enqueue(const struct scx_userland_enqueued_task *bpf_task)
197cc4448d0SEmil Tsalapatis {
198cc4448d0SEmil Tsalapatis 	struct enqueued_task *curr, *enqueued, *prev;
199cc4448d0SEmil Tsalapatis 
200cc4448d0SEmil Tsalapatis 	curr = get_enqueued_task(bpf_task->pid);
201cc4448d0SEmil Tsalapatis 	if (!curr)
202cc4448d0SEmil Tsalapatis 		return ENOENT;
203cc4448d0SEmil Tsalapatis 
204cc4448d0SEmil Tsalapatis 	update_enqueued(curr, bpf_task);
205*f892f9f9SDavid Carlier 	__atomic_add_fetch(&nr_vruntime_enqueues, 1, __ATOMIC_RELAXED);
206*f892f9f9SDavid Carlier 	__atomic_add_fetch(&nr_curr_enqueued, 1, __ATOMIC_RELAXED);
207cc4448d0SEmil Tsalapatis 
208cc4448d0SEmil Tsalapatis 	/*
209cc4448d0SEmil Tsalapatis 	 * Enqueue the task in a vruntime-sorted list. A more optimal data
210cc4448d0SEmil Tsalapatis 	 * structure such as an rbtree could easily be used as well. We elect
211cc4448d0SEmil Tsalapatis 	 * to use a list here simply because it's less code, and thus the
212cc4448d0SEmil Tsalapatis 	 * example is less convoluted and better serves to illustrate what a
213cc4448d0SEmil Tsalapatis 	 * user space scheduler could look like.
214cc4448d0SEmil Tsalapatis 	 */
215cc4448d0SEmil Tsalapatis 
216cc4448d0SEmil Tsalapatis 	if (LIST_EMPTY(&vruntime_head)) {
217cc4448d0SEmil Tsalapatis 		LIST_INSERT_HEAD(&vruntime_head, curr, entries);
218cc4448d0SEmil Tsalapatis 		return 0;
219cc4448d0SEmil Tsalapatis 	}
220cc4448d0SEmil Tsalapatis 
221cc4448d0SEmil Tsalapatis 	LIST_FOREACH(enqueued, &vruntime_head, entries) {
222cc4448d0SEmil Tsalapatis 		if (curr->vruntime <= enqueued->vruntime) {
223cc4448d0SEmil Tsalapatis 			LIST_INSERT_BEFORE(enqueued, curr, entries);
224cc4448d0SEmil Tsalapatis 			return 0;
225cc4448d0SEmil Tsalapatis 		}
226cc4448d0SEmil Tsalapatis 		prev = enqueued;
227cc4448d0SEmil Tsalapatis 	}
228cc4448d0SEmil Tsalapatis 
229cc4448d0SEmil Tsalapatis 	LIST_INSERT_AFTER(prev, curr, entries);
230cc4448d0SEmil Tsalapatis 
231cc4448d0SEmil Tsalapatis 	return 0;
232cc4448d0SEmil Tsalapatis }
233cc4448d0SEmil Tsalapatis 
drain_enqueued_map(void)234cc4448d0SEmil Tsalapatis static void drain_enqueued_map(void)
235cc4448d0SEmil Tsalapatis {
236cc4448d0SEmil Tsalapatis 	while (1) {
237cc4448d0SEmil Tsalapatis 		struct scx_userland_enqueued_task task;
238cc4448d0SEmil Tsalapatis 		int err;
239cc4448d0SEmil Tsalapatis 
240cc4448d0SEmil Tsalapatis 		if (bpf_map_lookup_and_delete_elem(enqueued_fd, NULL, &task)) {
241cc4448d0SEmil Tsalapatis 			skel->bss->nr_queued = 0;
242cc4448d0SEmil Tsalapatis 			skel->bss->nr_scheduled = nr_curr_enqueued;
243cc4448d0SEmil Tsalapatis 			return;
244cc4448d0SEmil Tsalapatis 		}
245cc4448d0SEmil Tsalapatis 
246cc4448d0SEmil Tsalapatis 		err = vruntime_enqueue(&task);
247cc4448d0SEmil Tsalapatis 		if (err) {
248cc4448d0SEmil Tsalapatis 			fprintf(stderr, "Failed to enqueue task %d: %s\n",
249cc4448d0SEmil Tsalapatis 				task.pid, strerror(err));
250cc4448d0SEmil Tsalapatis 			exit_req = 1;
251cc4448d0SEmil Tsalapatis 			return;
252cc4448d0SEmil Tsalapatis 		}
253cc4448d0SEmil Tsalapatis 	}
254cc4448d0SEmil Tsalapatis }
255cc4448d0SEmil Tsalapatis 
dispatch_batch(void)256cc4448d0SEmil Tsalapatis static void dispatch_batch(void)
257cc4448d0SEmil Tsalapatis {
258cc4448d0SEmil Tsalapatis 	__u32 i;
259cc4448d0SEmil Tsalapatis 
260cc4448d0SEmil Tsalapatis 	for (i = 0; i < batch_size; i++) {
261cc4448d0SEmil Tsalapatis 		struct enqueued_task *task;
262cc4448d0SEmil Tsalapatis 		int err;
263cc4448d0SEmil Tsalapatis 		__s32 pid;
264cc4448d0SEmil Tsalapatis 
265cc4448d0SEmil Tsalapatis 		task = LIST_FIRST(&vruntime_head);
266cc4448d0SEmil Tsalapatis 		if (!task)
267cc4448d0SEmil Tsalapatis 			break;
268cc4448d0SEmil Tsalapatis 
269cc4448d0SEmil Tsalapatis 		min_vruntime = task->vruntime;
270cc4448d0SEmil Tsalapatis 		pid = task_pid(task);
271cc4448d0SEmil Tsalapatis 		LIST_REMOVE(task, entries);
272cc4448d0SEmil Tsalapatis 		err = dispatch_task(pid);
273cc4448d0SEmil Tsalapatis 		if (err) {
274cc4448d0SEmil Tsalapatis 			/*
275cc4448d0SEmil Tsalapatis 			 * If we fail to dispatch, put the task back to the
276cc4448d0SEmil Tsalapatis 			 * vruntime_head list and stop dispatching additional
277cc4448d0SEmil Tsalapatis 			 * tasks in this batch.
278cc4448d0SEmil Tsalapatis 			 */
279cc4448d0SEmil Tsalapatis 			LIST_INSERT_HEAD(&vruntime_head, task, entries);
280cc4448d0SEmil Tsalapatis 			break;
281cc4448d0SEmil Tsalapatis 		}
282*f892f9f9SDavid Carlier 		__atomic_sub_fetch(&nr_curr_enqueued, 1, __ATOMIC_RELAXED);
283cc4448d0SEmil Tsalapatis 	}
284*f892f9f9SDavid Carlier 	skel->bss->nr_scheduled = __atomic_load_n(&nr_curr_enqueued, __ATOMIC_RELAXED);
285cc4448d0SEmil Tsalapatis }
286cc4448d0SEmil Tsalapatis 
run_stats_printer(void * arg)287cc4448d0SEmil Tsalapatis static void *run_stats_printer(void *arg)
288cc4448d0SEmil Tsalapatis {
289cc4448d0SEmil Tsalapatis 	while (!exit_req) {
290cc4448d0SEmil Tsalapatis 		__u64 nr_failed_enqueues, nr_kernel_enqueues, nr_user_enqueues, total;
291cc4448d0SEmil Tsalapatis 
292cc4448d0SEmil Tsalapatis 		nr_failed_enqueues = skel->bss->nr_failed_enqueues;
293cc4448d0SEmil Tsalapatis 		nr_kernel_enqueues = skel->bss->nr_kernel_enqueues;
294cc4448d0SEmil Tsalapatis 		nr_user_enqueues = skel->bss->nr_user_enqueues;
295cc4448d0SEmil Tsalapatis 		total = nr_failed_enqueues + nr_kernel_enqueues + nr_user_enqueues;
296cc4448d0SEmil Tsalapatis 
297cc4448d0SEmil Tsalapatis 		printf("o-----------------------o\n");
298cc4448d0SEmil Tsalapatis 		printf("| BPF ENQUEUES          |\n");
299cc4448d0SEmil Tsalapatis 		printf("|-----------------------|\n");
300cc4448d0SEmil Tsalapatis 		printf("|  kern:     %10llu |\n", nr_kernel_enqueues);
301cc4448d0SEmil Tsalapatis 		printf("|  user:     %10llu |\n", nr_user_enqueues);
302cc4448d0SEmil Tsalapatis 		printf("|  failed:   %10llu |\n", nr_failed_enqueues);
303cc4448d0SEmil Tsalapatis 		printf("|  -------------------- |\n");
304cc4448d0SEmil Tsalapatis 		printf("|  total:    %10llu |\n", total);
305cc4448d0SEmil Tsalapatis 		printf("|                       |\n");
306cc4448d0SEmil Tsalapatis 		printf("|-----------------------|\n");
307cc4448d0SEmil Tsalapatis 		printf("| VRUNTIME / USER       |\n");
308cc4448d0SEmil Tsalapatis 		printf("|-----------------------|\n");
309*f892f9f9SDavid Carlier 		printf("|  enq:      %10llu |\n", __atomic_load_n(&nr_vruntime_enqueues, __ATOMIC_RELAXED));
310*f892f9f9SDavid Carlier 		printf("|  disp:     %10llu |\n", __atomic_load_n(&nr_vruntime_dispatches, __ATOMIC_RELAXED));
311*f892f9f9SDavid Carlier 		printf("|  failed:   %10llu |\n", __atomic_load_n(&nr_vruntime_failed, __ATOMIC_RELAXED));
312cc4448d0SEmil Tsalapatis 		printf("o-----------------------o\n");
313cc4448d0SEmil Tsalapatis 		printf("\n\n");
314cc4448d0SEmil Tsalapatis 		fflush(stdout);
315cc4448d0SEmil Tsalapatis 		sleep(1);
316cc4448d0SEmil Tsalapatis 	}
317cc4448d0SEmil Tsalapatis 
318cc4448d0SEmil Tsalapatis 	return NULL;
319cc4448d0SEmil Tsalapatis }
320cc4448d0SEmil Tsalapatis 
spawn_stats_thread(void)321cc4448d0SEmil Tsalapatis static int spawn_stats_thread(void)
322cc4448d0SEmil Tsalapatis {
323cc4448d0SEmil Tsalapatis 	return pthread_create(&stats_printer, NULL, run_stats_printer, NULL);
324cc4448d0SEmil Tsalapatis }
325cc4448d0SEmil Tsalapatis 
pre_bootstrap(int argc,char ** argv)326cc4448d0SEmil Tsalapatis static void pre_bootstrap(int argc, char **argv)
327cc4448d0SEmil Tsalapatis {
328cc4448d0SEmil Tsalapatis 	int err;
329cc4448d0SEmil Tsalapatis 	__u32 opt;
330cc4448d0SEmil Tsalapatis 	struct sched_param sched_param = {
331cc4448d0SEmil Tsalapatis 		.sched_priority = sched_get_priority_max(SCHED_EXT),
332cc4448d0SEmil Tsalapatis 	};
333cc4448d0SEmil Tsalapatis 
334cc4448d0SEmil Tsalapatis 	err = init_tasks();
335cc4448d0SEmil Tsalapatis 	if (err)
336cc4448d0SEmil Tsalapatis 		exit(err);
337cc4448d0SEmil Tsalapatis 
338cc4448d0SEmil Tsalapatis 	libbpf_set_print(libbpf_print_fn);
339cc4448d0SEmil Tsalapatis 	signal(SIGINT, sigint_handler);
340cc4448d0SEmil Tsalapatis 	signal(SIGTERM, sigint_handler);
341cc4448d0SEmil Tsalapatis 
342cc4448d0SEmil Tsalapatis 	/*
343cc4448d0SEmil Tsalapatis 	 * Enforce that the user scheduler task is managed by sched_ext. The
344cc4448d0SEmil Tsalapatis 	 * task eagerly drains the list of enqueued tasks in its main work
345cc4448d0SEmil Tsalapatis 	 * loop, and then yields the CPU. The BPF scheduler only schedules the
346cc4448d0SEmil Tsalapatis 	 * user space scheduler task when at least one other task in the system
347cc4448d0SEmil Tsalapatis 	 * needs to be scheduled.
348cc4448d0SEmil Tsalapatis 	 */
349cc4448d0SEmil Tsalapatis 	err = syscall(__NR_sched_setscheduler, getpid(), SCHED_EXT, &sched_param);
350cc4448d0SEmil Tsalapatis 	SCX_BUG_ON(err, "Failed to set scheduler to SCHED_EXT");
351cc4448d0SEmil Tsalapatis 
352cc4448d0SEmil Tsalapatis 	while ((opt = getopt(argc, argv, "b:vh")) != -1) {
353cc4448d0SEmil Tsalapatis 		switch (opt) {
354cc4448d0SEmil Tsalapatis 		case 'b':
355cc4448d0SEmil Tsalapatis 			batch_size = strtoul(optarg, NULL, 0);
356cc4448d0SEmil Tsalapatis 			break;
357cc4448d0SEmil Tsalapatis 		case 'v':
358cc4448d0SEmil Tsalapatis 			verbose = true;
359cc4448d0SEmil Tsalapatis 			break;
360cc4448d0SEmil Tsalapatis 		default:
361cc4448d0SEmil Tsalapatis 			fprintf(stderr, help_fmt, basename(argv[0]));
362cc4448d0SEmil Tsalapatis 			exit(opt != 'h');
363cc4448d0SEmil Tsalapatis 		}
364cc4448d0SEmil Tsalapatis 	}
365cc4448d0SEmil Tsalapatis 
366cc4448d0SEmil Tsalapatis 	/*
367cc4448d0SEmil Tsalapatis 	 * It's not always safe to allocate in a user space scheduler, as an
368cc4448d0SEmil Tsalapatis 	 * enqueued task could hold a lock that we require in order to be able
369cc4448d0SEmil Tsalapatis 	 * to allocate.
370cc4448d0SEmil Tsalapatis 	 */
371cc4448d0SEmil Tsalapatis 	err = mlockall(MCL_CURRENT | MCL_FUTURE);
372cc4448d0SEmil Tsalapatis 	SCX_BUG_ON(err, "Failed to prefault and lock address space");
373cc4448d0SEmil Tsalapatis }
374cc4448d0SEmil Tsalapatis 
bootstrap(char * comm)375cc4448d0SEmil Tsalapatis static void bootstrap(char *comm)
376cc4448d0SEmil Tsalapatis {
377048714d9SDavid Carlier 	exit_req = 0;
37807676846SDavid Carlier 	min_vruntime = 0.0;
379*f892f9f9SDavid Carlier 	__atomic_store_n(&nr_vruntime_enqueues, 0, __ATOMIC_RELAXED);
380*f892f9f9SDavid Carlier 	__atomic_store_n(&nr_vruntime_dispatches, 0, __ATOMIC_RELAXED);
381*f892f9f9SDavid Carlier 	__atomic_store_n(&nr_vruntime_failed, 0, __ATOMIC_RELAXED);
382*f892f9f9SDavid Carlier 	__atomic_store_n(&nr_curr_enqueued, 0, __ATOMIC_RELAXED);
38307676846SDavid Carlier 	memset(tasks, 0, pid_max * sizeof(*tasks));
38407676846SDavid Carlier 	LIST_INIT(&vruntime_head);
38507676846SDavid Carlier 
386cc4448d0SEmil Tsalapatis 	skel = SCX_OPS_OPEN(userland_ops, scx_userland);
387cc4448d0SEmil Tsalapatis 
388cc4448d0SEmil Tsalapatis 	skel->rodata->num_possible_cpus = libbpf_num_possible_cpus();
389cc4448d0SEmil Tsalapatis 	assert(skel->rodata->num_possible_cpus > 0);
390cc4448d0SEmil Tsalapatis 	skel->rodata->usersched_pid = getpid();
391cc4448d0SEmil Tsalapatis 	assert(skel->rodata->usersched_pid > 0);
392cc4448d0SEmil Tsalapatis 
393cc4448d0SEmil Tsalapatis 	SCX_OPS_LOAD(skel, userland_ops, scx_userland, uei);
394cc4448d0SEmil Tsalapatis 
395cc4448d0SEmil Tsalapatis 	enqueued_fd = bpf_map__fd(skel->maps.enqueued);
396cc4448d0SEmil Tsalapatis 	dispatched_fd = bpf_map__fd(skel->maps.dispatched);
397cc4448d0SEmil Tsalapatis 	assert(enqueued_fd > 0);
398cc4448d0SEmil Tsalapatis 	assert(dispatched_fd > 0);
399cc4448d0SEmil Tsalapatis 
400cc4448d0SEmil Tsalapatis 	SCX_BUG_ON(spawn_stats_thread(), "Failed to spawn stats thread");
401cc4448d0SEmil Tsalapatis 
402cc4448d0SEmil Tsalapatis 	ops_link = SCX_OPS_ATTACH(skel, userland_ops, scx_userland);
403cc4448d0SEmil Tsalapatis }
404cc4448d0SEmil Tsalapatis 
sched_main_loop(void)405cc4448d0SEmil Tsalapatis static void sched_main_loop(void)
406cc4448d0SEmil Tsalapatis {
407cc4448d0SEmil Tsalapatis 	while (!exit_req) {
408cc4448d0SEmil Tsalapatis 		/*
409cc4448d0SEmil Tsalapatis 		 * Perform the following work in the main user space scheduler
410cc4448d0SEmil Tsalapatis 		 * loop:
411cc4448d0SEmil Tsalapatis 		 *
412cc4448d0SEmil Tsalapatis 		 * 1. Drain all tasks from the enqueued map, and enqueue them
413cc4448d0SEmil Tsalapatis 		 *    to the vruntime sorted list.
414cc4448d0SEmil Tsalapatis 		 *
415cc4448d0SEmil Tsalapatis 		 * 2. Dispatch a batch of tasks from the vruntime sorted list
416cc4448d0SEmil Tsalapatis 		 *    down to the kernel.
417cc4448d0SEmil Tsalapatis 		 *
418cc4448d0SEmil Tsalapatis 		 * 3. Yield the CPU back to the system. The BPF scheduler will
419cc4448d0SEmil Tsalapatis 		 *    reschedule the user space scheduler once another task has
420cc4448d0SEmil Tsalapatis 		 *    been enqueued to user space.
421cc4448d0SEmil Tsalapatis 		 */
422cc4448d0SEmil Tsalapatis 		drain_enqueued_map();
423cc4448d0SEmil Tsalapatis 		dispatch_batch();
424cc4448d0SEmil Tsalapatis 		sched_yield();
425cc4448d0SEmil Tsalapatis 	}
426cc4448d0SEmil Tsalapatis }
427cc4448d0SEmil Tsalapatis 
main(int argc,char ** argv)428cc4448d0SEmil Tsalapatis int main(int argc, char **argv)
429cc4448d0SEmil Tsalapatis {
430cc4448d0SEmil Tsalapatis 	__u64 ecode;
431cc4448d0SEmil Tsalapatis 
432cc4448d0SEmil Tsalapatis 	pre_bootstrap(argc, argv);
433cc4448d0SEmil Tsalapatis restart:
434cc4448d0SEmil Tsalapatis 	bootstrap(argv[0]);
435cc4448d0SEmil Tsalapatis 	sched_main_loop();
436cc4448d0SEmil Tsalapatis 
437cc4448d0SEmil Tsalapatis 	exit_req = 1;
438cc4448d0SEmil Tsalapatis 	bpf_link__destroy(ops_link);
439048714d9SDavid Carlier 	pthread_join(stats_printer, NULL);
440cc4448d0SEmil Tsalapatis 	ecode = UEI_REPORT(skel, uei);
441cc4448d0SEmil Tsalapatis 	scx_userland__destroy(skel);
442cc4448d0SEmil Tsalapatis 
443cc4448d0SEmil Tsalapatis 	if (UEI_ECODE_RESTART(ecode))
444cc4448d0SEmil Tsalapatis 		goto restart;
445cc4448d0SEmil Tsalapatis 	return 0;
446cc4448d0SEmil Tsalapatis }
447