xref: /linux/drivers/hid/bpf/hid_bpf_dispatch.c (revision e8445737c0264cf4ddac682c278e0ef5b8a61a3d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /*
4  *  HID-BPF support for Linux
5  *
6  *  Copyright (c) 2022 Benjamin Tissoires
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/bitops.h>
11 #include <linux/btf.h>
12 #include <linux/btf_ids.h>
13 #include <linux/filter.h>
14 #include <linux/hid.h>
15 #include <linux/hid_bpf.h>
16 #include <linux/init.h>
17 #include <linux/kfifo.h>
18 #include <linux/minmax.h>
19 #include <linux/module.h>
20 #include <linux/workqueue.h>
21 #include "hid_bpf_dispatch.h"
22 #include "entrypoints/entrypoints.lskel.h"
23 
24 struct hid_bpf_ops *hid_bpf_ops;
25 EXPORT_SYMBOL(hid_bpf_ops);
26 
27 /**
28  * hid_bpf_device_event - Called whenever an event is coming in from the device
29  *
30  * @ctx: The HID-BPF context
31  *
32  * @return %0 on success and keep processing; a positive value to change the
33  * incoming size buffer; a negative error code to interrupt the processing
34  * of this event
35  *
36  * Declare an %fmod_ret tracing bpf program to this function and attach this
37  * program through hid_bpf_attach_prog() to have this helper called for
38  * any incoming event from the device itself.
39  *
40  * The function is called while on IRQ context, so we can not sleep.
41  */
42 /* never used by the kernel but declared so we can load and attach a tracepoint */
43 __weak noinline int hid_bpf_device_event(struct hid_bpf_ctx *ctx)
44 {
45 	return 0;
46 }
47 ALLOW_ERROR_INJECTION(hid_bpf_device_event, ERRNO);
48 
49 u8 *
50 dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type, u8 *data,
51 			      u32 *size, int interrupt)
52 {
53 	struct hid_bpf_ctx_kern ctx_kern = {
54 		.ctx = {
55 			.hid = hdev,
56 			.report_type = type,
57 			.allocated_size = hdev->bpf.allocated_data,
58 			.size = *size,
59 		},
60 		.data = hdev->bpf.device_data,
61 	};
62 	int ret;
63 
64 	if (type >= HID_REPORT_TYPES)
65 		return ERR_PTR(-EINVAL);
66 
67 	/* no program has been attached yet */
68 	if (!hdev->bpf.device_data)
69 		return data;
70 
71 	memset(ctx_kern.data, 0, hdev->bpf.allocated_data);
72 	memcpy(ctx_kern.data, data, *size);
73 
74 	ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_DEVICE_EVENT, &ctx_kern);
75 	if (ret < 0)
76 		return ERR_PTR(ret);
77 
78 	if (ret) {
79 		if (ret > ctx_kern.ctx.allocated_size)
80 			return ERR_PTR(-EINVAL);
81 
82 		*size = ret;
83 	}
84 
85 	return ctx_kern.data;
86 }
87 EXPORT_SYMBOL_GPL(dispatch_hid_bpf_device_event);
88 
89 /**
90  * hid_bpf_rdesc_fixup - Called when the probe function parses the report
91  * descriptor of the HID device
92  *
93  * @ctx: The HID-BPF context
94  *
95  * @return 0 on success and keep processing; a positive value to change the
96  * incoming size buffer; a negative error code to interrupt the processing
97  * of this event
98  *
99  * Declare an %fmod_ret tracing bpf program to this function and attach this
100  * program through hid_bpf_attach_prog() to have this helper called before any
101  * parsing of the report descriptor by HID.
102  */
103 /* never used by the kernel but declared so we can load and attach a tracepoint */
104 __weak noinline int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx)
105 {
106 	return 0;
107 }
108 ALLOW_ERROR_INJECTION(hid_bpf_rdesc_fixup, ERRNO);
109 
110 u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size)
111 {
112 	int ret;
113 	struct hid_bpf_ctx_kern ctx_kern = {
114 		.ctx = {
115 			.hid = hdev,
116 			.size = *size,
117 			.allocated_size = HID_MAX_DESCRIPTOR_SIZE,
118 		},
119 	};
120 
121 	ctx_kern.data = kzalloc(ctx_kern.ctx.allocated_size, GFP_KERNEL);
122 	if (!ctx_kern.data)
123 		goto ignore_bpf;
124 
125 	memcpy(ctx_kern.data, rdesc, min_t(unsigned int, *size, HID_MAX_DESCRIPTOR_SIZE));
126 
127 	ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_RDESC_FIXUP, &ctx_kern);
128 	if (ret < 0)
129 		goto ignore_bpf;
130 
131 	if (ret) {
132 		if (ret > ctx_kern.ctx.allocated_size)
133 			goto ignore_bpf;
134 
135 		*size = ret;
136 	}
137 
138 	rdesc = krealloc(ctx_kern.data, *size, GFP_KERNEL);
139 
140 	return rdesc;
141 
142  ignore_bpf:
143 	kfree(ctx_kern.data);
144 	return kmemdup(rdesc, *size, GFP_KERNEL);
145 }
146 EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
147 
148 /**
149  * hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx
150  *
151  * @ctx: The HID-BPF context
152  * @offset: The offset within the memory
153  * @rdwr_buf_size: the const size of the buffer
154  *
155  * @returns %NULL on error, an %__u8 memory pointer on success
156  */
157 noinline __u8 *
158 hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size)
159 {
160 	struct hid_bpf_ctx_kern *ctx_kern;
161 
162 	if (!ctx)
163 		return NULL;
164 
165 	ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
166 
167 	if (rdwr_buf_size + offset > ctx->allocated_size)
168 		return NULL;
169 
170 	return ctx_kern->data + offset;
171 }
172 
173 /*
174  * The following set contains all functions we agree BPF programs
175  * can use.
176  */
177 BTF_SET8_START(hid_bpf_kfunc_ids)
178 BTF_ID_FLAGS(func, call_hid_bpf_prog_put_deferred)
179 BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL)
180 BTF_SET8_END(hid_bpf_kfunc_ids)
181 
182 static const struct btf_kfunc_id_set hid_bpf_kfunc_set = {
183 	.owner = THIS_MODULE,
184 	.set   = &hid_bpf_kfunc_ids,
185 };
186 
187 static int device_match_id(struct device *dev, const void *id)
188 {
189 	struct hid_device *hdev = to_hid_device(dev);
190 
191 	return hdev->id == *(int *)id;
192 }
193 
194 static int __hid_bpf_allocate_data(struct hid_device *hdev, u8 **data, u32 *size)
195 {
196 	u8 *alloc_data;
197 	unsigned int i, j, max_report_len = 0;
198 	size_t alloc_size = 0;
199 
200 	/* compute the maximum report length for this device */
201 	for (i = 0; i < HID_REPORT_TYPES; i++) {
202 		struct hid_report_enum *report_enum = hdev->report_enum + i;
203 
204 		for (j = 0; j < HID_MAX_IDS; j++) {
205 			struct hid_report *report = report_enum->report_id_hash[j];
206 
207 			if (report)
208 				max_report_len = max(max_report_len, hid_report_len(report));
209 		}
210 	}
211 
212 	/*
213 	 * Give us a little bit of extra space and some predictability in the
214 	 * buffer length we create. This way, we can tell users that they can
215 	 * work on chunks of 64 bytes of memory without having the bpf verifier
216 	 * scream at them.
217 	 */
218 	alloc_size = DIV_ROUND_UP(max_report_len, 64) * 64;
219 
220 	alloc_data = kzalloc(alloc_size, GFP_KERNEL);
221 	if (!alloc_data)
222 		return -ENOMEM;
223 
224 	*data = alloc_data;
225 	*size = alloc_size;
226 
227 	return 0;
228 }
229 
230 static int hid_bpf_allocate_event_data(struct hid_device *hdev)
231 {
232 	/* hdev->bpf.device_data is already allocated, abort */
233 	if (hdev->bpf.device_data)
234 		return 0;
235 
236 	return __hid_bpf_allocate_data(hdev, &hdev->bpf.device_data, &hdev->bpf.allocated_data);
237 }
238 
239 int hid_bpf_reconnect(struct hid_device *hdev)
240 {
241 	if (!test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
242 		return device_reprobe(&hdev->dev);
243 
244 	return 0;
245 }
246 
247 /**
248  * hid_bpf_attach_prog - Attach the given @prog_fd to the given HID device
249  *
250  * @hid_id: the system unique identifier of the HID device
251  * @prog_fd: an fd in the user process representing the program to attach
252  * @flags: any logical OR combination of &enum hid_bpf_attach_flags
253  *
254  * @returns %0 on success, an error code otherwise.
255  */
256 /* called from syscall */
257 noinline int
258 hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
259 {
260 	struct hid_device *hdev;
261 	struct device *dev;
262 	int err, prog_type = hid_bpf_get_prog_attach_type(prog_fd);
263 
264 	if (!hid_bpf_ops)
265 		return -EINVAL;
266 
267 	if (prog_type < 0)
268 		return prog_type;
269 
270 	if (prog_type >= HID_BPF_PROG_TYPE_MAX)
271 		return -EINVAL;
272 
273 	if ((flags & ~HID_BPF_FLAG_MASK))
274 		return -EINVAL;
275 
276 	dev = bus_find_device(hid_bpf_ops->bus_type, NULL, &hid_id, device_match_id);
277 	if (!dev)
278 		return -EINVAL;
279 
280 	hdev = to_hid_device(dev);
281 
282 	if (prog_type == HID_BPF_PROG_TYPE_DEVICE_EVENT) {
283 		err = hid_bpf_allocate_event_data(hdev);
284 		if (err)
285 			return err;
286 	}
287 
288 	err = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, flags);
289 	if (err)
290 		return err;
291 
292 	if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) {
293 		err = hid_bpf_reconnect(hdev);
294 		if (err)
295 			return err;
296 	}
297 
298 	return 0;
299 }
300 
301 /**
302  * hid_bpf_allocate_context - Allocate a context to the given HID device
303  *
304  * @hid_id: the system unique identifier of the HID device
305  *
306  * @returns A pointer to &struct hid_bpf_ctx on success, %NULL on error.
307  */
308 noinline struct hid_bpf_ctx *
309 hid_bpf_allocate_context(unsigned int hid_id)
310 {
311 	struct hid_device *hdev;
312 	struct hid_bpf_ctx_kern *ctx_kern = NULL;
313 	struct device *dev;
314 
315 	if (!hid_bpf_ops)
316 		return NULL;
317 
318 	dev = bus_find_device(hid_bpf_ops->bus_type, NULL, &hid_id, device_match_id);
319 	if (!dev)
320 		return NULL;
321 
322 	hdev = to_hid_device(dev);
323 
324 	ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL);
325 	if (!ctx_kern)
326 		return NULL;
327 
328 	ctx_kern->ctx.hid = hdev;
329 
330 	return &ctx_kern->ctx;
331 }
332 
333 /**
334  * hid_bpf_release_context - Release the previously allocated context @ctx
335  *
336  * @ctx: the HID-BPF context to release
337  *
338  */
339 noinline void
340 hid_bpf_release_context(struct hid_bpf_ctx *ctx)
341 {
342 	struct hid_bpf_ctx_kern *ctx_kern;
343 
344 	if (!ctx)
345 		return;
346 
347 	ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
348 
349 	kfree(ctx_kern);
350 }
351 
352 /**
353  * hid_bpf_hw_request - Communicate with a HID device
354  *
355  * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
356  * @buf: a %PTR_TO_MEM buffer
357  * @buf__sz: the size of the data to transfer
358  * @rtype: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
359  * @reqtype: the type of the request (%HID_REQ_GET_REPORT, %HID_REQ_SET_REPORT, ...)
360  *
361  * @returns %0 on success, a negative error code otherwise.
362  */
363 noinline int
364 hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
365 		   enum hid_report_type rtype, enum hid_class_request reqtype)
366 {
367 	struct hid_device *hdev;
368 	struct hid_report *report;
369 	struct hid_report_enum *report_enum;
370 	u8 *dma_data;
371 	u32 report_len;
372 	int ret;
373 
374 	/* check arguments */
375 	if (!ctx || !hid_bpf_ops || !buf)
376 		return -EINVAL;
377 
378 	switch (rtype) {
379 	case HID_INPUT_REPORT:
380 	case HID_OUTPUT_REPORT:
381 	case HID_FEATURE_REPORT:
382 		break;
383 	default:
384 		return -EINVAL;
385 	}
386 
387 	switch (reqtype) {
388 	case HID_REQ_GET_REPORT:
389 	case HID_REQ_GET_IDLE:
390 	case HID_REQ_GET_PROTOCOL:
391 	case HID_REQ_SET_REPORT:
392 	case HID_REQ_SET_IDLE:
393 	case HID_REQ_SET_PROTOCOL:
394 		break;
395 	default:
396 		return -EINVAL;
397 	}
398 
399 	if (buf__sz < 1)
400 		return -EINVAL;
401 
402 	hdev = (struct hid_device *)ctx->hid; /* discard const */
403 
404 	report_enum = hdev->report_enum + rtype;
405 	report = hid_bpf_ops->hid_get_report(report_enum, buf);
406 	if (!report)
407 		return -EINVAL;
408 
409 	report_len = hid_report_len(report);
410 
411 	if (buf__sz > report_len)
412 		buf__sz = report_len;
413 
414 	dma_data = kmemdup(buf, buf__sz, GFP_KERNEL);
415 	if (!dma_data)
416 		return -ENOMEM;
417 
418 	ret = hid_bpf_ops->hid_hw_raw_request(hdev,
419 					      dma_data[0],
420 					      dma_data,
421 					      buf__sz,
422 					      rtype,
423 					      reqtype);
424 
425 	if (ret > 0)
426 		memcpy(buf, dma_data, ret);
427 
428 	kfree(dma_data);
429 	return ret;
430 }
431 
432 /* for syscall HID-BPF */
433 BTF_SET8_START(hid_bpf_syscall_kfunc_ids)
434 BTF_ID_FLAGS(func, hid_bpf_attach_prog)
435 BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL)
436 BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE)
437 BTF_ID_FLAGS(func, hid_bpf_hw_request)
438 BTF_SET8_END(hid_bpf_syscall_kfunc_ids)
439 
440 static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = {
441 	.owner = THIS_MODULE,
442 	.set   = &hid_bpf_syscall_kfunc_ids,
443 };
444 
445 int hid_bpf_connect_device(struct hid_device *hdev)
446 {
447 	struct hid_bpf_prog_list *prog_list;
448 
449 	rcu_read_lock();
450 	prog_list = rcu_dereference(hdev->bpf.progs[HID_BPF_PROG_TYPE_DEVICE_EVENT]);
451 	rcu_read_unlock();
452 
453 	/* only allocate BPF data if there are programs attached */
454 	if (!prog_list)
455 		return 0;
456 
457 	return hid_bpf_allocate_event_data(hdev);
458 }
459 EXPORT_SYMBOL_GPL(hid_bpf_connect_device);
460 
461 void hid_bpf_disconnect_device(struct hid_device *hdev)
462 {
463 	kfree(hdev->bpf.device_data);
464 	hdev->bpf.device_data = NULL;
465 	hdev->bpf.allocated_data = 0;
466 }
467 EXPORT_SYMBOL_GPL(hid_bpf_disconnect_device);
468 
469 void hid_bpf_destroy_device(struct hid_device *hdev)
470 {
471 	if (!hdev)
472 		return;
473 
474 	/* mark the device as destroyed in bpf so we don't reattach it */
475 	hdev->bpf.destroyed = true;
476 
477 	__hid_bpf_destroy_device(hdev);
478 }
479 EXPORT_SYMBOL_GPL(hid_bpf_destroy_device);
480 
481 void hid_bpf_device_init(struct hid_device *hdev)
482 {
483 	spin_lock_init(&hdev->bpf.progs_lock);
484 }
485 EXPORT_SYMBOL_GPL(hid_bpf_device_init);
486 
487 static int __init hid_bpf_init(void)
488 {
489 	int err;
490 
491 	/* Note: if we exit with an error any time here, we would entirely break HID, which
492 	 * is probably not something we want. So we log an error and return success.
493 	 *
494 	 * This is not a big deal: the syscall allowing to attach a BPF program to a HID device
495 	 * will not be available, so nobody will be able to use the functionality.
496 	 */
497 
498 	err = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &hid_bpf_kfunc_set);
499 	if (err) {
500 		pr_warn("error while setting HID BPF tracing kfuncs: %d", err);
501 		return 0;
502 	}
503 
504 	err = hid_bpf_preload_skel();
505 	if (err) {
506 		pr_warn("error while preloading HID BPF dispatcher: %d", err);
507 		return 0;
508 	}
509 
510 	/* register syscalls after we are sure we can load our preloaded bpf program */
511 	err = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &hid_bpf_syscall_kfunc_set);
512 	if (err) {
513 		pr_warn("error while setting HID BPF syscall kfuncs: %d", err);
514 		return 0;
515 	}
516 
517 	return 0;
518 }
519 
520 static void __exit hid_bpf_exit(void)
521 {
522 	/* HID depends on us, so if we hit that code, we are guaranteed that hid
523 	 * has been removed and thus we do not need to clear the HID devices
524 	 */
525 	hid_bpf_free_links_and_skel();
526 }
527 
528 late_initcall(hid_bpf_init);
529 module_exit(hid_bpf_exit);
530 MODULE_AUTHOR("Benjamin Tissoires");
531 MODULE_LICENSE("GPL");
532