xref: /linux/drivers/hid/bpf/hid_bpf_struct_ops.c (revision 015a4a2a439b285943da471d38b2721bbe4d8b39)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /*
4  *  HID-BPF support for Linux
5  *
6  *  Copyright (c) 2024 Benjamin Tissoires
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/bpf_verifier.h>
11 #include <linux/bpf.h>
12 #include <linux/btf.h>
13 #include <linux/btf_ids.h>
14 #include <linux/filter.h>
15 #include <linux/hid.h>
16 #include <linux/hid_bpf.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/stddef.h>
20 #include <linux/workqueue.h>
21 #include "hid_bpf_dispatch.h"
22 
23 static struct btf *hid_bpf_ops_btf;
24 
25 static int hid_bpf_ops_init(struct btf *btf)
26 {
27 	hid_bpf_ops_btf = btf;
28 	return 0;
29 }
30 
31 static bool hid_bpf_ops_is_valid_access(int off, int size,
32 					  enum bpf_access_type type,
33 					  const struct bpf_prog *prog,
34 					  struct bpf_insn_access_aux *info)
35 {
36 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
37 }
38 
39 static int hid_bpf_ops_check_member(const struct btf_type *t,
40 				      const struct btf_member *member,
41 				      const struct bpf_prog *prog)
42 {
43 	u32 moff = __btf_member_bit_offset(t, member) / 8;
44 
45 	switch (moff) {
46 	case offsetof(struct hid_bpf_ops, hid_rdesc_fixup):
47 	case offsetof(struct hid_bpf_ops, hid_hw_request):
48 		break;
49 	default:
50 		if (prog->sleepable)
51 			return -EINVAL;
52 	}
53 
54 	return 0;
55 }
56 
57 struct hid_bpf_offset_write_range {
58 	const char *struct_name;
59 	u32 struct_length;
60 	u32 start;
61 	u32 end;
62 };
63 
64 static int hid_bpf_ops_btf_struct_access(struct bpf_verifier_log *log,
65 					   const struct bpf_reg_state *reg,
66 					   int off, int size)
67 {
68 #define WRITE_RANGE(_name, _field, _is_string)					\
69 	{									\
70 		.struct_name = #_name,						\
71 		.struct_length = sizeof(struct _name),				\
72 		.start = offsetof(struct _name, _field),			\
73 		.end = offsetofend(struct _name, _field) - !!(_is_string),	\
74 	}
75 
76 	const struct hid_bpf_offset_write_range write_ranges[] = {
77 		WRITE_RANGE(hid_bpf_ctx, retval, false),
78 		WRITE_RANGE(hid_device, name, true),
79 		WRITE_RANGE(hid_device, uniq, true),
80 		WRITE_RANGE(hid_device, phys, true),
81 	};
82 #undef WRITE_RANGE
83 	const struct btf_type *state = NULL;
84 	const struct btf_type *t;
85 	const char *cur = NULL;
86 	int i;
87 
88 	t = btf_type_by_id(reg->btf, reg->btf_id);
89 
90 	for (i = 0; i < ARRAY_SIZE(write_ranges); i++) {
91 		const struct hid_bpf_offset_write_range *write_range = &write_ranges[i];
92 		s32 type_id;
93 
94 		/* we already found a writeable struct, but there is a
95 		 * new one, let's break the loop.
96 		 */
97 		if (t == state && write_range->struct_name != cur)
98 			break;
99 
100 		/* new struct to look for */
101 		if (write_range->struct_name != cur) {
102 			type_id = btf_find_by_name_kind(reg->btf, write_range->struct_name,
103 							BTF_KIND_STRUCT);
104 			if (type_id < 0)
105 				return -EINVAL;
106 
107 			state = btf_type_by_id(reg->btf, type_id);
108 		}
109 
110 		/* this is not the struct we are looking for */
111 		if (t != state) {
112 			cur = write_range->struct_name;
113 			continue;
114 		}
115 
116 		/* first time we see this struct, check for out of bounds */
117 		if (cur != write_range->struct_name &&
118 		    off + size > write_range->struct_length) {
119 			bpf_log(log, "write access for struct %s at off %d with size %d\n",
120 				write_range->struct_name, off, size);
121 			return -EACCES;
122 		}
123 
124 		/* now check if we are in our boundaries */
125 		if (off >= write_range->start && off + size <= write_range->end)
126 			return NOT_INIT;
127 
128 		cur = write_range->struct_name;
129 	}
130 
131 
132 	if (t != state)
133 		bpf_log(log, "write access to this struct is not supported\n");
134 	else
135 		bpf_log(log,
136 			"write access at off %d with size %d on read-only part of %s\n",
137 			off, size, cur);
138 
139 	return -EACCES;
140 }
141 
142 static const struct bpf_verifier_ops hid_bpf_verifier_ops = {
143 	.get_func_proto = bpf_base_func_proto,
144 	.is_valid_access = hid_bpf_ops_is_valid_access,
145 	.btf_struct_access = hid_bpf_ops_btf_struct_access,
146 };
147 
148 static int hid_bpf_ops_init_member(const struct btf_type *t,
149 				 const struct btf_member *member,
150 				 void *kdata, const void *udata)
151 {
152 	const struct hid_bpf_ops *uhid_bpf_ops;
153 	struct hid_bpf_ops *khid_bpf_ops;
154 	u32 moff;
155 
156 	uhid_bpf_ops = (const struct hid_bpf_ops *)udata;
157 	khid_bpf_ops = (struct hid_bpf_ops *)kdata;
158 
159 	moff = __btf_member_bit_offset(t, member) / 8;
160 
161 	switch (moff) {
162 	case offsetof(struct hid_bpf_ops, hid_id):
163 		/* For hid_id and flags fields, this function has to copy it
164 		 * and return 1 to indicate that the data has been handled by
165 		 * the struct_ops type, or the verifier will reject the map if
166 		 * the value of those fields is not zero.
167 		 */
168 		khid_bpf_ops->hid_id = uhid_bpf_ops->hid_id;
169 		return 1;
170 	case offsetof(struct hid_bpf_ops, flags):
171 		if (uhid_bpf_ops->flags & ~BPF_F_BEFORE)
172 			return -EINVAL;
173 		khid_bpf_ops->flags = uhid_bpf_ops->flags;
174 		return 1;
175 	}
176 	return 0;
177 }
178 
179 static int hid_bpf_reg(void *kdata)
180 {
181 	struct hid_bpf_ops *ops = kdata;
182 	struct hid_device *hdev;
183 	int count, err = 0;
184 
185 	hdev = hid_get_device(ops->hid_id);
186 	if (IS_ERR(hdev))
187 		return PTR_ERR(hdev);
188 
189 	ops->hdev = hdev;
190 
191 	mutex_lock(&hdev->bpf.prog_list_lock);
192 
193 	count = list_count_nodes(&hdev->bpf.prog_list);
194 	if (count >= HID_BPF_MAX_PROGS_PER_DEV) {
195 		err = -E2BIG;
196 		goto out_unlock;
197 	}
198 
199 	if (ops->hid_rdesc_fixup) {
200 		if (hdev->bpf.rdesc_ops) {
201 			err = -EINVAL;
202 			goto out_unlock;
203 		}
204 
205 		hdev->bpf.rdesc_ops = ops;
206 	}
207 
208 	if (ops->hid_device_event) {
209 		err = hid_bpf_allocate_event_data(hdev);
210 		if (err)
211 			goto out_unlock;
212 	}
213 
214 	if (ops->flags & BPF_F_BEFORE)
215 		list_add_rcu(&ops->list, &hdev->bpf.prog_list);
216 	else
217 		list_add_tail_rcu(&ops->list, &hdev->bpf.prog_list);
218 	synchronize_srcu(&hdev->bpf.srcu);
219 
220 out_unlock:
221 	mutex_unlock(&hdev->bpf.prog_list_lock);
222 
223 	if (err) {
224 		if (hdev->bpf.rdesc_ops == ops)
225 			hdev->bpf.rdesc_ops = NULL;
226 		hid_put_device(hdev);
227 	} else if (ops->hid_rdesc_fixup) {
228 		hid_bpf_reconnect(hdev);
229 	}
230 
231 	return err;
232 }
233 
234 static void hid_bpf_unreg(void *kdata)
235 {
236 	struct hid_bpf_ops *ops = kdata;
237 	struct hid_device *hdev;
238 	bool reconnect = false;
239 
240 	hdev = ops->hdev;
241 
242 	/* check if __hid_bpf_ops_destroy_device() has been called */
243 	if (!hdev)
244 		return;
245 
246 	mutex_lock(&hdev->bpf.prog_list_lock);
247 
248 	list_del_rcu(&ops->list);
249 	synchronize_srcu(&hdev->bpf.srcu);
250 
251 	reconnect = hdev->bpf.rdesc_ops == ops;
252 	if (reconnect)
253 		hdev->bpf.rdesc_ops = NULL;
254 
255 	mutex_unlock(&hdev->bpf.prog_list_lock);
256 
257 	if (reconnect)
258 		hid_bpf_reconnect(hdev);
259 
260 	hid_put_device(hdev);
261 }
262 
263 static int __hid_bpf_device_event(struct hid_bpf_ctx *ctx, enum hid_report_type type, __u64 source)
264 {
265 	return 0;
266 }
267 
268 static int __hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx)
269 {
270 	return 0;
271 }
272 
273 static struct hid_bpf_ops __bpf_hid_bpf_ops = {
274 	.hid_device_event = __hid_bpf_device_event,
275 	.hid_rdesc_fixup = __hid_bpf_rdesc_fixup,
276 };
277 
278 static struct bpf_struct_ops bpf_hid_bpf_ops = {
279 	.verifier_ops = &hid_bpf_verifier_ops,
280 	.init = hid_bpf_ops_init,
281 	.check_member = hid_bpf_ops_check_member,
282 	.init_member = hid_bpf_ops_init_member,
283 	.reg = hid_bpf_reg,
284 	.unreg = hid_bpf_unreg,
285 	.name = "hid_bpf_ops",
286 	.cfi_stubs = &__bpf_hid_bpf_ops,
287 	.owner = THIS_MODULE,
288 };
289 
290 void __hid_bpf_ops_destroy_device(struct hid_device *hdev)
291 {
292 	struct hid_bpf_ops *e;
293 
294 	rcu_read_lock();
295 	list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
296 		hid_put_device(hdev);
297 		e->hdev = NULL;
298 	}
299 	rcu_read_unlock();
300 }
301 
302 static int __init hid_bpf_struct_ops_init(void)
303 {
304 	return register_bpf_struct_ops(&bpf_hid_bpf_ops, hid_bpf_ops);
305 }
306 late_initcall(hid_bpf_struct_ops_init);
307