xref: /linux/drivers/hid/bpf/progs/hid_bpf_async.h (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 /* SPDX-License-Identifier: GPL-2.0-only
2  * Copyright (c) 2024 Benjamin Tissoires
3  */
4 
5 #ifndef __HID_BPF_ASYNC_H__
6 #define __HID_BPF_ASYNC_H__
7 
8 #ifndef HID_BPF_ASYNC_MAX_CTX
9 #error "HID_BPF_ASYNC_MAX_CTX should be set to the maximum number of concurrent async functions"
10 #endif /* HID_BPF_ASYNC_MAX_CTX */
11 
12 #define CLOCK_MONOTONIC		1
13 
14 typedef int (*hid_bpf_async_callback_t)(void *map, int *key, void *value);
15 
16 enum hid_bpf_async_state {
17 	HID_BPF_ASYNC_STATE_UNSET = 0,
18 	HID_BPF_ASYNC_STATE_INITIALIZING,
19 	HID_BPF_ASYNC_STATE_INITIALIZED,
20 	HID_BPF_ASYNC_STATE_STARTING,
21 	HID_BPF_ASYNC_STATE_RUNNING,
22 };
23 
24 struct hid_bpf_async_map_elem {
25 	struct bpf_spin_lock lock;
26 	enum hid_bpf_async_state state;
27 	struct bpf_timer t;
28 	struct bpf_wq wq;
29 	u32 hid;
30 };
31 
32 struct {
33 	__uint(type, BPF_MAP_TYPE_ARRAY);
34 	__uint(max_entries, HID_BPF_ASYNC_MAX_CTX);
35 	__type(key, u32);
36 	__type(value, struct hid_bpf_async_map_elem);
37 } hid_bpf_async_ctx_map SEC(".maps");
38 
39 /**
40  * HID_BPF_ASYNC_CB: macro to define an async callback used in a bpf_wq
41  *
42  * The caller is responsible for allocating a key in the async map
43  * with hid_bpf_async_get_ctx().
44  */
45 #define HID_BPF_ASYNC_CB(cb)					\
46 cb(void *map, int *key, void *value);				\
47 static __always_inline int					\
48 ____##cb(struct hid_bpf_ctx *ctx);				\
49 typeof(cb(0, 0, 0)) cb(void *map, int *key, void *value)	\
50 {								\
51 	struct hid_bpf_async_map_elem *e;				\
52 	struct hid_bpf_ctx *ctx;				\
53 								\
54 	e = (struct hid_bpf_async_map_elem *)value;			\
55 	ctx = hid_bpf_allocate_context(e->hid);			\
56 	if (!ctx)						\
57 		return 0; /* EPERM check */			\
58 								\
59 	e->state = HID_BPF_ASYNC_STATE_RUNNING;			\
60 								\
61 	____##cb(ctx);						\
62 								\
63 	e->state = HID_BPF_ASYNC_STATE_INITIALIZED;		\
64 	hid_bpf_release_context(ctx);				\
65 	return 0;						\
66 }								\
67 static __always_inline int					\
68 ____##cb
69 
70 /**
71  * ASYNC: macro to automatically handle async callbacks contexts
72  *
73  * Needs to be used in conjunction with HID_BPF_ASYNC_INIT and HID_BPF_ASYNC_DELAYED_CALL
74  */
75 #define HID_BPF_ASYNC_FUN(fun)						\
76 fun(struct hid_bpf_ctx *ctx);					\
77 int ____key__##fun;						\
78 static int ____async_init_##fun(void)				\
79 {								\
80 	____key__##fun = hid_bpf_async_get_ctx();			\
81 	if (____key__##fun < 0)					\
82 		return ____key__##fun;				\
83 	return 0;						\
84 }								\
85 static int HID_BPF_ASYNC_CB(____##fun##_cb)(struct hid_bpf_ctx *hctx)	\
86 {								\
87 	return fun(hctx);					\
88 }								\
89 typeof(fun(0)) fun
90 
91 #define HID_BPF_ASYNC_INIT(fun)	____async_init_##fun()
92 #define HID_BPF_ASYNC_DELAYED_CALL(fun, ctx, delay)		\
93 	hid_bpf_async_delayed_call(ctx, delay, ____key__##fun, ____##fun##_cb)
94 
95 /*
96  * internal cb for starting the delayed work callback in a workqueue.
97  */
98 static int __start_wq_timer_cb(void *map, int *key, void *value)
99 {
100 	struct hid_bpf_async_map_elem *e = (struct hid_bpf_async_map_elem *)value;
101 
102 	bpf_wq_start(&e->wq, 0);
103 
104 	return 0;
105 }
106 
107 static int hid_bpf_async_find_empty_key(void)
108 {
109 	int i;
110 
111 	bpf_for(i, 0, HID_BPF_ASYNC_MAX_CTX) {
112 		struct hid_bpf_async_map_elem *elem;
113 		int key = i;
114 
115 		elem = bpf_map_lookup_elem(&hid_bpf_async_ctx_map, &key);
116 		if (!elem)
117 			return -ENOMEM; /* should never happen */
118 
119 		{
120 			guard(bpf_spin)(&elem->lock);
121 
122 			if (elem->state == HID_BPF_ASYNC_STATE_UNSET) {
123 				elem->state = HID_BPF_ASYNC_STATE_INITIALIZING;
124 				return i;
125 			}
126 		}
127 	}
128 
129 	return -EINVAL;
130 }
131 
132 static int hid_bpf_async_get_ctx(void)
133 {
134 	int key = hid_bpf_async_find_empty_key();
135 	struct hid_bpf_async_map_elem *elem;
136 	int err;
137 
138 	if (key < 0)
139 		return key;
140 
141 	elem = bpf_map_lookup_elem(&hid_bpf_async_ctx_map, &key);
142 	if (!elem)
143 		return -EINVAL;
144 
145 	err = bpf_timer_init(&elem->t, &hid_bpf_async_ctx_map, CLOCK_MONOTONIC);
146 	if (err)
147 		return err;
148 
149 	err = bpf_timer_set_callback(&elem->t, __start_wq_timer_cb);
150 	if (err)
151 		return err;
152 
153 	err = bpf_wq_init(&elem->wq, &hid_bpf_async_ctx_map, 0);
154 	if (err)
155 		return err;
156 
157 	elem->state = HID_BPF_ASYNC_STATE_INITIALIZED;
158 
159 	return key;
160 }
161 
162 static inline u64 ms_to_ns(u64 milliseconds)
163 {
164 	return (u64)milliseconds * 1000UL * 1000UL;
165 }
166 
167 static int hid_bpf_async_delayed_call(struct hid_bpf_ctx *hctx, u64 milliseconds, int key,
168 			      hid_bpf_async_callback_t wq_cb)
169 {
170 	struct hid_bpf_async_map_elem *elem;
171 	int err;
172 
173 	elem = bpf_map_lookup_elem(&hid_bpf_async_ctx_map, &key);
174 	if (!elem)
175 		return -EINVAL;
176 
177 	{
178 		guard(bpf_spin)(&elem->lock);
179 
180 		/* The wq must be:
181 		 * - HID_BPF_ASYNC_STATE_INITIALIZED -> it's been initialized and ready to be called
182 		 * - HID_BPF_ASYNC_STATE_RUNNING -> possible re-entry from the wq itself
183 		 */
184 		if (elem->state != HID_BPF_ASYNC_STATE_INITIALIZED &&
185 		    elem->state != HID_BPF_ASYNC_STATE_RUNNING)
186 			return -EINVAL;
187 
188 		elem->state = HID_BPF_ASYNC_STATE_STARTING;
189 	}
190 
191 	elem->hid = hctx->hid->id;
192 
193 	err = bpf_wq_set_callback(&elem->wq, wq_cb, 0);
194 	if (err)
195 		return err;
196 
197 	if (milliseconds) {
198 		/* needed for every call because a cancel might unset this */
199 		err = bpf_timer_set_callback(&elem->t, __start_wq_timer_cb);
200 		if (err)
201 			return err;
202 
203 		err = bpf_timer_start(&elem->t, ms_to_ns(milliseconds), 0);
204 		if (err)
205 			return err;
206 
207 		return 0;
208 	}
209 
210 	return bpf_wq_start(&elem->wq, 0);
211 }
212 
213 static inline int hid_bpf_async_call(struct hid_bpf_ctx *ctx, int key,
214 				     hid_bpf_async_callback_t wq_cb)
215 {
216 	return hid_bpf_async_delayed_call(ctx, 0, key, wq_cb);
217 }
218 
219 #endif /* __HID_BPF_ASYNC_H__ */
220