xref: /linux/kernel/trace/rv/monitors/deadline/deadline.h (revision fdbfee9fc56e13a1307868829d438ad66ab308a4)
1*b133207dSGabriele Monaco /* SPDX-License-Identifier: GPL-2.0 */
2*b133207dSGabriele Monaco 
3*b133207dSGabriele Monaco #include <linux/kernel.h>
4*b133207dSGabriele Monaco #include <linux/uaccess.h>
5*b133207dSGabriele Monaco #include <linux/sched/deadline.h>
6*b133207dSGabriele Monaco #include <asm/syscall.h>
7*b133207dSGabriele Monaco #include <uapi/linux/sched/types.h>
8*b133207dSGabriele Monaco #include <trace/events/sched.h>
9*b133207dSGabriele Monaco 
10*b133207dSGabriele Monaco /*
11*b133207dSGabriele Monaco  * Dummy values if not available
12*b133207dSGabriele Monaco  */
13*b133207dSGabriele Monaco #ifndef __NR_sched_setscheduler
14*b133207dSGabriele Monaco #define __NR_sched_setscheduler -__COUNTER__
15*b133207dSGabriele Monaco #endif
16*b133207dSGabriele Monaco #ifndef __NR_sched_setattr
17*b133207dSGabriele Monaco #define __NR_sched_setattr -__COUNTER__
18*b133207dSGabriele Monaco #endif
19*b133207dSGabriele Monaco 
20*b133207dSGabriele Monaco extern struct rv_monitor rv_deadline;
21*b133207dSGabriele Monaco /* Initialised when registering the deadline container */
22*b133207dSGabriele Monaco extern struct sched_class *rv_ext_sched_class;
23*b133207dSGabriele Monaco 
24*b133207dSGabriele Monaco /*
25*b133207dSGabriele Monaco  * If both have dummy values, the syscalls are not supported and we don't even
26*b133207dSGabriele Monaco  * need to register the handler.
27*b133207dSGabriele Monaco  */
28*b133207dSGabriele Monaco static inline bool should_skip_syscall_handle(void)
29*b133207dSGabriele Monaco {
30*b133207dSGabriele Monaco 	return __NR_sched_setattr < 0 && __NR_sched_setscheduler < 0;
31*b133207dSGabriele Monaco }
32*b133207dSGabriele Monaco 
33*b133207dSGabriele Monaco /*
34*b133207dSGabriele Monaco  * is_supported_type - return true if @type is supported by the deadline monitors
35*b133207dSGabriele Monaco  */
36*b133207dSGabriele Monaco static inline bool is_supported_type(u8 type)
37*b133207dSGabriele Monaco {
38*b133207dSGabriele Monaco 	return type == DL_TASK || type == DL_SERVER_FAIR || type == DL_SERVER_EXT;
39*b133207dSGabriele Monaco }
40*b133207dSGabriele Monaco 
41*b133207dSGabriele Monaco /*
42*b133207dSGabriele Monaco  * is_server_type - return true if @type is a supported server
43*b133207dSGabriele Monaco  */
44*b133207dSGabriele Monaco static inline bool is_server_type(u8 type)
45*b133207dSGabriele Monaco {
46*b133207dSGabriele Monaco 	return is_supported_type(type) && type != DL_TASK;
47*b133207dSGabriele Monaco }
48*b133207dSGabriele Monaco 
49*b133207dSGabriele Monaco /*
50*b133207dSGabriele Monaco  * Use negative numbers for the server.
51*b133207dSGabriele Monaco  * Currently only one fair server per CPU, may change in the future.
52*b133207dSGabriele Monaco  */
53*b133207dSGabriele Monaco #define fair_server_id(cpu) (-cpu)
54*b133207dSGabriele Monaco #define ext_server_id(cpu) (-cpu - num_possible_cpus())
55*b133207dSGabriele Monaco #define NO_SERVER_ID (-2 * num_possible_cpus())
56*b133207dSGabriele Monaco /*
57*b133207dSGabriele Monaco  * Get a unique id used for dl entities
58*b133207dSGabriele Monaco  *
59*b133207dSGabriele Monaco  * The cpu is not required for tasks as the pid is used there, if this function
60*b133207dSGabriele Monaco  * is called on a dl_se that for sure corresponds to a task, DL_TASK can be
61*b133207dSGabriele Monaco  * used in place of cpu.
62*b133207dSGabriele Monaco  * We need the cpu for servers as it is provided in the tracepoint and we
63*b133207dSGabriele Monaco  * cannot easily retrieve it from the dl_se (requires the struct rq definition).
64*b133207dSGabriele Monaco  */
65*b133207dSGabriele Monaco static inline int get_entity_id(struct sched_dl_entity *dl_se, int cpu, u8 type)
66*b133207dSGabriele Monaco {
67*b133207dSGabriele Monaco 	if (dl_server(dl_se) && type != DL_TASK) {
68*b133207dSGabriele Monaco 		if (type == DL_SERVER_FAIR)
69*b133207dSGabriele Monaco 			return fair_server_id(cpu);
70*b133207dSGabriele Monaco 		if (type == DL_SERVER_EXT)
71*b133207dSGabriele Monaco 			return ext_server_id(cpu);
72*b133207dSGabriele Monaco 		return NO_SERVER_ID;
73*b133207dSGabriele Monaco 	}
74*b133207dSGabriele Monaco 	return dl_task_of(dl_se)->pid;
75*b133207dSGabriele Monaco }
76*b133207dSGabriele Monaco 
77*b133207dSGabriele Monaco static inline bool task_is_scx_enabled(struct task_struct *tsk)
78*b133207dSGabriele Monaco {
79*b133207dSGabriele Monaco 	return IS_ENABLED(CONFIG_SCHED_CLASS_EXT) &&
80*b133207dSGabriele Monaco 	       tsk->sched_class == rv_ext_sched_class;
81*b133207dSGabriele Monaco }
82*b133207dSGabriele Monaco 
83*b133207dSGabriele Monaco /* Expand id and target as arguments for da functions */
84*b133207dSGabriele Monaco #define EXPAND_ID(dl_se, cpu, type) get_entity_id(dl_se, cpu, type), dl_se
85*b133207dSGabriele Monaco #define EXPAND_ID_TASK(tsk) get_entity_id(&tsk->dl, task_cpu(tsk), DL_TASK), &tsk->dl
86*b133207dSGabriele Monaco 
87*b133207dSGabriele Monaco static inline u8 get_server_type(struct task_struct *tsk)
88*b133207dSGabriele Monaco {
89*b133207dSGabriele Monaco 	if (tsk->policy == SCHED_NORMAL || tsk->policy == SCHED_EXT ||
90*b133207dSGabriele Monaco 	    tsk->policy == SCHED_BATCH || tsk->policy == SCHED_IDLE)
91*b133207dSGabriele Monaco 		return task_is_scx_enabled(tsk) ? DL_SERVER_EXT : DL_SERVER_FAIR;
92*b133207dSGabriele Monaco 	return DL_OTHER;
93*b133207dSGabriele Monaco }
94*b133207dSGabriele Monaco 
95*b133207dSGabriele Monaco static inline int extract_params(struct pt_regs *regs, long id, pid_t *pid_out)
96*b133207dSGabriele Monaco {
97*b133207dSGabriele Monaco 	size_t size = offsetofend(struct sched_attr, sched_flags);
98*b133207dSGabriele Monaco 	struct sched_attr __user *uattr, attr;
99*b133207dSGabriele Monaco 	int new_policy = -1, ret;
100*b133207dSGabriele Monaco 	unsigned long args[6];
101*b133207dSGabriele Monaco 
102*b133207dSGabriele Monaco 	switch (id) {
103*b133207dSGabriele Monaco 	case __NR_sched_setscheduler:
104*b133207dSGabriele Monaco 		syscall_get_arguments(current, regs, args);
105*b133207dSGabriele Monaco 		*pid_out = args[0];
106*b133207dSGabriele Monaco 		new_policy = args[1];
107*b133207dSGabriele Monaco 		break;
108*b133207dSGabriele Monaco 	case __NR_sched_setattr:
109*b133207dSGabriele Monaco 		syscall_get_arguments(current, regs, args);
110*b133207dSGabriele Monaco 		*pid_out = args[0];
111*b133207dSGabriele Monaco 		uattr = (struct sched_attr __user *)args[1];
112*b133207dSGabriele Monaco 		/*
113*b133207dSGabriele Monaco 		 * Just copy up to sched_flags, we are not interested after that
114*b133207dSGabriele Monaco 		 */
115*b133207dSGabriele Monaco 		ret = copy_struct_from_user(&attr, size, uattr, size);
116*b133207dSGabriele Monaco 		if (ret)
117*b133207dSGabriele Monaco 			return ret;
118*b133207dSGabriele Monaco 		if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
119*b133207dSGabriele Monaco 			return -EINVAL;
120*b133207dSGabriele Monaco 		new_policy = attr.sched_policy;
121*b133207dSGabriele Monaco 		break;
122*b133207dSGabriele Monaco 	default:
123*b133207dSGabriele Monaco 		return -EINVAL;
124*b133207dSGabriele Monaco 	}
125*b133207dSGabriele Monaco 
126*b133207dSGabriele Monaco 	return new_policy & ~SCHED_RESET_ON_FORK;
127*b133207dSGabriele Monaco }
128*b133207dSGabriele Monaco 
129*b133207dSGabriele Monaco /* Helper functions requiring DA/HA utilities */
130*b133207dSGabriele Monaco #ifdef RV_MON_TYPE
131*b133207dSGabriele Monaco 
132*b133207dSGabriele Monaco /*
133*b133207dSGabriele Monaco  * get_fair_server - get the fair server associated to a task
134*b133207dSGabriele Monaco  *
135*b133207dSGabriele Monaco  * If the task is a boosted task, the server is available in the task_struct,
136*b133207dSGabriele Monaco  * otherwise grab the dl entity saved for the CPU where the task is enqueued.
137*b133207dSGabriele Monaco  * This function assumes the task is enqueued somewhere.
138*b133207dSGabriele Monaco  */
139*b133207dSGabriele Monaco static inline struct sched_dl_entity *get_server(struct task_struct *tsk, u8 type)
140*b133207dSGabriele Monaco {
141*b133207dSGabriele Monaco 	if (tsk->dl_server && get_server_type(tsk) == type)
142*b133207dSGabriele Monaco 		return tsk->dl_server;
143*b133207dSGabriele Monaco 	if (type == DL_SERVER_FAIR)
144*b133207dSGabriele Monaco 		return da_get_target_by_id(fair_server_id(task_cpu(tsk)));
145*b133207dSGabriele Monaco 	if (type == DL_SERVER_EXT)
146*b133207dSGabriele Monaco 		return da_get_target_by_id(ext_server_id(task_cpu(tsk)));
147*b133207dSGabriele Monaco 	return NULL;
148*b133207dSGabriele Monaco }
149*b133207dSGabriele Monaco 
150*b133207dSGabriele Monaco /*
151*b133207dSGabriele Monaco  * Initialise monitors for all tasks and pre-allocate the storage for servers.
152*b133207dSGabriele Monaco  * This is necessary since we don't have access to the servers here and
153*b133207dSGabriele Monaco  * allocation can cause deadlocks from their tracepoints. We can only fill
154*b133207dSGabriele Monaco  * pre-initialised storage from there.
155*b133207dSGabriele Monaco  */
156*b133207dSGabriele Monaco static inline int init_storage(bool skip_tasks)
157*b133207dSGabriele Monaco {
158*b133207dSGabriele Monaco 	struct task_struct *g, *p;
159*b133207dSGabriele Monaco 	int cpu;
160*b133207dSGabriele Monaco 
161*b133207dSGabriele Monaco 	for_each_possible_cpu(cpu) {
162*b133207dSGabriele Monaco 		if (!da_create_empty_storage(fair_server_id(cpu)))
163*b133207dSGabriele Monaco 			goto fail;
164*b133207dSGabriele Monaco 		if (IS_ENABLED(CONFIG_SCHED_CLASS_EXT) &&
165*b133207dSGabriele Monaco 		    !da_create_empty_storage(ext_server_id(cpu)))
166*b133207dSGabriele Monaco 			goto fail;
167*b133207dSGabriele Monaco 	}
168*b133207dSGabriele Monaco 
169*b133207dSGabriele Monaco 	if (skip_tasks)
170*b133207dSGabriele Monaco 		return 0;
171*b133207dSGabriele Monaco 
172*b133207dSGabriele Monaco 	read_lock(&tasklist_lock);
173*b133207dSGabriele Monaco 	for_each_process_thread(g, p) {
174*b133207dSGabriele Monaco 		if (p->policy == SCHED_DEADLINE) {
175*b133207dSGabriele Monaco 			if (!da_create_storage(EXPAND_ID_TASK(p), NULL)) {
176*b133207dSGabriele Monaco 				read_unlock(&tasklist_lock);
177*b133207dSGabriele Monaco 				goto fail;
178*b133207dSGabriele Monaco 			}
179*b133207dSGabriele Monaco 		}
180*b133207dSGabriele Monaco 	}
181*b133207dSGabriele Monaco 	read_unlock(&tasklist_lock);
182*b133207dSGabriele Monaco 	return 0;
183*b133207dSGabriele Monaco 
184*b133207dSGabriele Monaco fail:
185*b133207dSGabriele Monaco 	da_monitor_destroy();
186*b133207dSGabriele Monaco 	return -ENOMEM;
187*b133207dSGabriele Monaco }
188*b133207dSGabriele Monaco 
189*b133207dSGabriele Monaco static void __maybe_unused handle_newtask(void *data, struct task_struct *task, u64 flags)
190*b133207dSGabriele Monaco {
191*b133207dSGabriele Monaco 	/* Might be superfluous as tasks are not started with this policy.. */
192*b133207dSGabriele Monaco 	if (task->policy == SCHED_DEADLINE)
193*b133207dSGabriele Monaco 		da_create_storage(EXPAND_ID_TASK(task), NULL);
194*b133207dSGabriele Monaco }
195*b133207dSGabriele Monaco 
196*b133207dSGabriele Monaco static void __maybe_unused handle_exit(void *data, struct task_struct *p, bool group_dead)
197*b133207dSGabriele Monaco {
198*b133207dSGabriele Monaco 	if (p->policy == SCHED_DEADLINE)
199*b133207dSGabriele Monaco 		da_destroy_storage(get_entity_id(&p->dl, DL_TASK, DL_TASK));
200*b133207dSGabriele Monaco }
201*b133207dSGabriele Monaco 
202*b133207dSGabriele Monaco #endif
203