xref: /linux/drivers/hid/bpf/hid_bpf_dispatch.c (revision 3ac83fcd6e67c86d25040e6818972f2c36b51d23)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /*
4  *  HID-BPF support for Linux
5  *
6  *  Copyright (c) 2022-2024 Benjamin Tissoires
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/bitops.h>
11 #include <linux/btf.h>
12 #include <linux/btf_ids.h>
13 #include <linux/filter.h>
14 #include <linux/hid.h>
15 #include <linux/hid_bpf.h>
16 #include <linux/init.h>
17 #include <linux/kfifo.h>
18 #include <linux/minmax.h>
19 #include <linux/module.h>
20 #include "hid_bpf_dispatch.h"
21 
22 struct hid_ops *hid_ops;
23 EXPORT_SYMBOL(hid_ops);
24 
25 u8 *
26 dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type, u8 *data,
27 			      u32 *size, int interrupt, u64 source)
28 {
29 	struct hid_bpf_ctx_kern ctx_kern = {
30 		.ctx = {
31 			.hid = hdev,
32 			.allocated_size = hdev->bpf.allocated_data,
33 			.size = *size,
34 		},
35 		.data = hdev->bpf.device_data,
36 	};
37 	struct hid_bpf_ops *e;
38 	int ret;
39 
40 	if (type >= HID_REPORT_TYPES)
41 		return ERR_PTR(-EINVAL);
42 
43 	/* no program has been attached yet */
44 	if (!hdev->bpf.device_data)
45 		return data;
46 
47 	memset(ctx_kern.data, 0, hdev->bpf.allocated_data);
48 	memcpy(ctx_kern.data, data, *size);
49 
50 	rcu_read_lock();
51 	list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
52 		if (e->hid_device_event) {
53 			ret = e->hid_device_event(&ctx_kern.ctx, type, source);
54 			if (ret < 0) {
55 				rcu_read_unlock();
56 				return ERR_PTR(ret);
57 			}
58 
59 			if (ret)
60 				ctx_kern.ctx.size = ret;
61 		}
62 	}
63 	rcu_read_unlock();
64 
65 	ret = ctx_kern.ctx.size;
66 	if (ret) {
67 		if (ret > ctx_kern.ctx.allocated_size)
68 			return ERR_PTR(-EINVAL);
69 
70 		*size = ret;
71 	}
72 
73 	return ctx_kern.data;
74 }
75 EXPORT_SYMBOL_GPL(dispatch_hid_bpf_device_event);
76 
77 int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
78 				  unsigned char reportnum, u8 *buf,
79 				  u32 size, enum hid_report_type rtype,
80 				  enum hid_class_request reqtype,
81 				  u64 source, bool from_bpf)
82 {
83 	struct hid_bpf_ctx_kern ctx_kern = {
84 		.ctx = {
85 			.hid = hdev,
86 			.allocated_size = size,
87 			.size = size,
88 		},
89 		.data = buf,
90 		.from_bpf = from_bpf,
91 	};
92 	struct hid_bpf_ops *e;
93 	int ret, idx;
94 
95 	if (rtype >= HID_REPORT_TYPES)
96 		return -EINVAL;
97 
98 	idx = srcu_read_lock(&hdev->bpf.srcu);
99 	list_for_each_entry_srcu(e, &hdev->bpf.prog_list, list,
100 				 srcu_read_lock_held(&hdev->bpf.srcu)) {
101 		if (!e->hid_hw_request)
102 			continue;
103 
104 		ret = e->hid_hw_request(&ctx_kern.ctx, reportnum, rtype, reqtype, source);
105 		if (ret)
106 			goto out;
107 	}
108 	ret = 0;
109 
110 out:
111 	srcu_read_unlock(&hdev->bpf.srcu, idx);
112 	return ret;
113 }
114 EXPORT_SYMBOL_GPL(dispatch_hid_bpf_raw_requests);
115 
116 int dispatch_hid_bpf_output_report(struct hid_device *hdev,
117 				   __u8 *buf, u32 size, __u64 source,
118 				   bool from_bpf)
119 {
120 	struct hid_bpf_ctx_kern ctx_kern = {
121 		.ctx = {
122 			.hid = hdev,
123 			.allocated_size = size,
124 			.size = size,
125 		},
126 		.data = buf,
127 		.from_bpf = from_bpf,
128 	};
129 	struct hid_bpf_ops *e;
130 	int ret, idx;
131 
132 	idx = srcu_read_lock(&hdev->bpf.srcu);
133 	list_for_each_entry_srcu(e, &hdev->bpf.prog_list, list,
134 				 srcu_read_lock_held(&hdev->bpf.srcu)) {
135 		if (!e->hid_hw_output_report)
136 			continue;
137 
138 		ret = e->hid_hw_output_report(&ctx_kern.ctx, source);
139 		if (ret)
140 			goto out;
141 	}
142 	ret = 0;
143 
144 out:
145 	srcu_read_unlock(&hdev->bpf.srcu, idx);
146 	return ret;
147 }
148 EXPORT_SYMBOL_GPL(dispatch_hid_bpf_output_report);
149 
150 u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size)
151 {
152 	int ret;
153 	struct hid_bpf_ctx_kern ctx_kern = {
154 		.ctx = {
155 			.hid = hdev,
156 			.size = *size,
157 			.allocated_size = HID_MAX_DESCRIPTOR_SIZE,
158 		},
159 	};
160 
161 	if (!hdev->bpf.rdesc_ops)
162 		goto ignore_bpf;
163 
164 	ctx_kern.data = kzalloc(ctx_kern.ctx.allocated_size, GFP_KERNEL);
165 	if (!ctx_kern.data)
166 		goto ignore_bpf;
167 
168 	memcpy(ctx_kern.data, rdesc, min_t(unsigned int, *size, HID_MAX_DESCRIPTOR_SIZE));
169 
170 	ret = hdev->bpf.rdesc_ops->hid_rdesc_fixup(&ctx_kern.ctx);
171 	if (ret < 0)
172 		goto ignore_bpf;
173 
174 	if (ret) {
175 		if (ret > ctx_kern.ctx.allocated_size)
176 			goto ignore_bpf;
177 
178 		*size = ret;
179 	}
180 
181 	rdesc = krealloc(ctx_kern.data, *size, GFP_KERNEL);
182 
183 	return rdesc;
184 
185  ignore_bpf:
186 	kfree(ctx_kern.data);
187 	return kmemdup(rdesc, *size, GFP_KERNEL);
188 }
189 EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
190 
191 static int device_match_id(struct device *dev, const void *id)
192 {
193 	struct hid_device *hdev = to_hid_device(dev);
194 
195 	return hdev->id == *(int *)id;
196 }
197 
198 struct hid_device *hid_get_device(unsigned int hid_id)
199 {
200 	struct device *dev;
201 
202 	if (!hid_ops)
203 		return ERR_PTR(-EINVAL);
204 
205 	dev = bus_find_device(hid_ops->bus_type, NULL, &hid_id, device_match_id);
206 	if (!dev)
207 		return ERR_PTR(-EINVAL);
208 
209 	return to_hid_device(dev);
210 }
211 
212 void hid_put_device(struct hid_device *hid)
213 {
214 	put_device(&hid->dev);
215 }
216 
217 static int __hid_bpf_allocate_data(struct hid_device *hdev, u8 **data, u32 *size)
218 {
219 	u8 *alloc_data;
220 	unsigned int i, j, max_report_len = 0;
221 	size_t alloc_size = 0;
222 
223 	/* compute the maximum report length for this device */
224 	for (i = 0; i < HID_REPORT_TYPES; i++) {
225 		struct hid_report_enum *report_enum = hdev->report_enum + i;
226 
227 		for (j = 0; j < HID_MAX_IDS; j++) {
228 			struct hid_report *report = report_enum->report_id_hash[j];
229 
230 			if (report)
231 				max_report_len = max(max_report_len, hid_report_len(report));
232 		}
233 	}
234 
235 	/*
236 	 * Give us a little bit of extra space and some predictability in the
237 	 * buffer length we create. This way, we can tell users that they can
238 	 * work on chunks of 64 bytes of memory without having the bpf verifier
239 	 * scream at them.
240 	 */
241 	alloc_size = DIV_ROUND_UP(max_report_len, 64) * 64;
242 
243 	alloc_data = kzalloc(alloc_size, GFP_KERNEL);
244 	if (!alloc_data)
245 		return -ENOMEM;
246 
247 	*data = alloc_data;
248 	*size = alloc_size;
249 
250 	return 0;
251 }
252 
253 int hid_bpf_allocate_event_data(struct hid_device *hdev)
254 {
255 	/* hdev->bpf.device_data is already allocated, abort */
256 	if (hdev->bpf.device_data)
257 		return 0;
258 
259 	return __hid_bpf_allocate_data(hdev, &hdev->bpf.device_data, &hdev->bpf.allocated_data);
260 }
261 
262 int hid_bpf_reconnect(struct hid_device *hdev)
263 {
264 	if (!test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
265 		return device_reprobe(&hdev->dev);
266 
267 	return 0;
268 }
269 
270 /* Disables missing prototype warnings */
271 __bpf_kfunc_start_defs();
272 
273 /**
274  * hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx
275  *
276  * @ctx: The HID-BPF context
277  * @offset: The offset within the memory
278  * @rdwr_buf_size: the const size of the buffer
279  *
280  * @returns %NULL on error, an %__u8 memory pointer on success
281  */
282 __bpf_kfunc __u8 *
283 hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size)
284 {
285 	struct hid_bpf_ctx_kern *ctx_kern;
286 
287 	if (!ctx)
288 		return NULL;
289 
290 	ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
291 
292 	if (rdwr_buf_size + offset > ctx->allocated_size)
293 		return NULL;
294 
295 	return ctx_kern->data + offset;
296 }
297 
298 /**
299  * hid_bpf_allocate_context - Allocate a context to the given HID device
300  *
301  * @hid_id: the system unique identifier of the HID device
302  *
303  * @returns A pointer to &struct hid_bpf_ctx on success, %NULL on error.
304  */
305 __bpf_kfunc struct hid_bpf_ctx *
306 hid_bpf_allocate_context(unsigned int hid_id)
307 {
308 	struct hid_device *hdev;
309 	struct hid_bpf_ctx_kern *ctx_kern = NULL;
310 
311 	hdev = hid_get_device(hid_id);
312 	if (IS_ERR(hdev))
313 		return NULL;
314 
315 	ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL);
316 	if (!ctx_kern) {
317 		hid_put_device(hdev);
318 		return NULL;
319 	}
320 
321 	ctx_kern->ctx.hid = hdev;
322 
323 	return &ctx_kern->ctx;
324 }
325 
326 /**
327  * hid_bpf_release_context - Release the previously allocated context @ctx
328  *
329  * @ctx: the HID-BPF context to release
330  *
331  */
332 __bpf_kfunc void
333 hid_bpf_release_context(struct hid_bpf_ctx *ctx)
334 {
335 	struct hid_bpf_ctx_kern *ctx_kern;
336 	struct hid_device *hid;
337 
338 	ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
339 	hid = (struct hid_device *)ctx_kern->ctx.hid; /* ignore const */
340 
341 	kfree(ctx_kern);
342 
343 	/* get_device() is called by bus_find_device() */
344 	hid_put_device(hid);
345 }
346 
347 static int
348 __hid_bpf_hw_check_params(struct hid_bpf_ctx *ctx, __u8 *buf, size_t *buf__sz,
349 			  enum hid_report_type rtype)
350 {
351 	struct hid_report_enum *report_enum;
352 	struct hid_report *report;
353 	struct hid_device *hdev;
354 	u32 report_len;
355 
356 	/* check arguments */
357 	if (!ctx || !hid_ops || !buf)
358 		return -EINVAL;
359 
360 	switch (rtype) {
361 	case HID_INPUT_REPORT:
362 	case HID_OUTPUT_REPORT:
363 	case HID_FEATURE_REPORT:
364 		break;
365 	default:
366 		return -EINVAL;
367 	}
368 
369 	if (*buf__sz < 1)
370 		return -EINVAL;
371 
372 	hdev = (struct hid_device *)ctx->hid; /* discard const */
373 
374 	report_enum = hdev->report_enum + rtype;
375 	report = hid_ops->hid_get_report(report_enum, buf);
376 	if (!report)
377 		return -EINVAL;
378 
379 	report_len = hid_report_len(report);
380 
381 	if (*buf__sz > report_len)
382 		*buf__sz = report_len;
383 
384 	return 0;
385 }
386 
387 /**
388  * hid_bpf_hw_request - Communicate with a HID device
389  *
390  * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
391  * @buf: a %PTR_TO_MEM buffer
392  * @buf__sz: the size of the data to transfer
393  * @rtype: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
394  * @reqtype: the type of the request (%HID_REQ_GET_REPORT, %HID_REQ_SET_REPORT, ...)
395  *
396  * @returns %0 on success, a negative error code otherwise.
397  */
398 __bpf_kfunc int
399 hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
400 		   enum hid_report_type rtype, enum hid_class_request reqtype)
401 {
402 	struct hid_bpf_ctx_kern *ctx_kern;
403 	struct hid_device *hdev;
404 	size_t size = buf__sz;
405 	u8 *dma_data;
406 	int ret;
407 
408 	ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
409 
410 	if (ctx_kern->from_bpf)
411 		return -EDEADLOCK;
412 
413 	/* check arguments */
414 	ret = __hid_bpf_hw_check_params(ctx, buf, &size, rtype);
415 	if (ret)
416 		return ret;
417 
418 	switch (reqtype) {
419 	case HID_REQ_GET_REPORT:
420 	case HID_REQ_GET_IDLE:
421 	case HID_REQ_GET_PROTOCOL:
422 	case HID_REQ_SET_REPORT:
423 	case HID_REQ_SET_IDLE:
424 	case HID_REQ_SET_PROTOCOL:
425 		break;
426 	default:
427 		return -EINVAL;
428 	}
429 
430 	hdev = (struct hid_device *)ctx->hid; /* discard const */
431 
432 	dma_data = kmemdup(buf, size, GFP_KERNEL);
433 	if (!dma_data)
434 		return -ENOMEM;
435 
436 	ret = hid_ops->hid_hw_raw_request(hdev,
437 					      dma_data[0],
438 					      dma_data,
439 					      size,
440 					      rtype,
441 					      reqtype,
442 					      (__u64)ctx,
443 					      true); /* prevent infinite recursions */
444 
445 	if (ret > 0)
446 		memcpy(buf, dma_data, ret);
447 
448 	kfree(dma_data);
449 	return ret;
450 }
451 
452 /**
453  * hid_bpf_hw_output_report - Send an output report to a HID device
454  *
455  * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
456  * @buf: a %PTR_TO_MEM buffer
457  * @buf__sz: the size of the data to transfer
458  *
459  * Returns the number of bytes transferred on success, a negative error code otherwise.
460  */
461 __bpf_kfunc int
462 hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz)
463 {
464 	struct hid_bpf_ctx_kern *ctx_kern;
465 	struct hid_device *hdev;
466 	size_t size = buf__sz;
467 	u8 *dma_data;
468 	int ret;
469 
470 	ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
471 	if (ctx_kern->from_bpf)
472 		return -EDEADLOCK;
473 
474 	/* check arguments */
475 	ret = __hid_bpf_hw_check_params(ctx, buf, &size, HID_OUTPUT_REPORT);
476 	if (ret)
477 		return ret;
478 
479 	hdev = (struct hid_device *)ctx->hid; /* discard const */
480 
481 	dma_data = kmemdup(buf, size, GFP_KERNEL);
482 	if (!dma_data)
483 		return -ENOMEM;
484 
485 	ret = hid_ops->hid_hw_output_report(hdev, dma_data, size, (__u64)ctx, true);
486 
487 	kfree(dma_data);
488 	return ret;
489 }
490 
491 /**
492  * hid_bpf_input_report - Inject a HID report in the kernel from a HID device
493  *
494  * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
495  * @type: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
496  * @buf: a %PTR_TO_MEM buffer
497  * @buf__sz: the size of the data to transfer
498  *
499  * Returns %0 on success, a negative error code otherwise.
500  */
501 __bpf_kfunc int
502 hid_bpf_input_report(struct hid_bpf_ctx *ctx, enum hid_report_type type, u8 *buf,
503 		     const size_t buf__sz)
504 {
505 	struct hid_device *hdev;
506 	size_t size = buf__sz;
507 	int ret;
508 
509 	/* check arguments */
510 	ret = __hid_bpf_hw_check_params(ctx, buf, &size, type);
511 	if (ret)
512 		return ret;
513 
514 	hdev = (struct hid_device *)ctx->hid; /* discard const */
515 
516 	return hid_ops->hid_input_report(hdev, type, buf, size, 0, (__u64)ctx);
517 }
518 __bpf_kfunc_end_defs();
519 
520 /*
521  * The following set contains all functions we agree BPF programs
522  * can use.
523  */
524 BTF_KFUNCS_START(hid_bpf_kfunc_ids)
525 BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL)
526 BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
527 BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE | KF_SLEEPABLE)
528 BTF_ID_FLAGS(func, hid_bpf_hw_request, KF_SLEEPABLE)
529 BTF_ID_FLAGS(func, hid_bpf_hw_output_report, KF_SLEEPABLE)
530 BTF_ID_FLAGS(func, hid_bpf_input_report, KF_SLEEPABLE)
531 BTF_KFUNCS_END(hid_bpf_kfunc_ids)
532 
533 static const struct btf_kfunc_id_set hid_bpf_kfunc_set = {
534 	.owner = THIS_MODULE,
535 	.set   = &hid_bpf_kfunc_ids,
536 };
537 
538 /* for syscall HID-BPF */
539 BTF_KFUNCS_START(hid_bpf_syscall_kfunc_ids)
540 BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL)
541 BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE)
542 BTF_ID_FLAGS(func, hid_bpf_hw_request)
543 BTF_ID_FLAGS(func, hid_bpf_hw_output_report)
544 BTF_ID_FLAGS(func, hid_bpf_input_report)
545 BTF_KFUNCS_END(hid_bpf_syscall_kfunc_ids)
546 
547 static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = {
548 	.owner = THIS_MODULE,
549 	.set   = &hid_bpf_syscall_kfunc_ids,
550 };
551 
552 int hid_bpf_connect_device(struct hid_device *hdev)
553 {
554 	bool need_to_allocate = false;
555 	struct hid_bpf_ops *e;
556 
557 	rcu_read_lock();
558 	list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
559 		if (e->hid_device_event) {
560 			need_to_allocate = true;
561 			break;
562 		}
563 	}
564 	rcu_read_unlock();
565 
566 	/* only allocate BPF data if there are programs attached */
567 	if (!need_to_allocate)
568 		return 0;
569 
570 	return hid_bpf_allocate_event_data(hdev);
571 }
572 EXPORT_SYMBOL_GPL(hid_bpf_connect_device);
573 
574 void hid_bpf_disconnect_device(struct hid_device *hdev)
575 {
576 	kfree(hdev->bpf.device_data);
577 	hdev->bpf.device_data = NULL;
578 	hdev->bpf.allocated_data = 0;
579 }
580 EXPORT_SYMBOL_GPL(hid_bpf_disconnect_device);
581 
582 void hid_bpf_destroy_device(struct hid_device *hdev)
583 {
584 	if (!hdev)
585 		return;
586 
587 	/* mark the device as destroyed in bpf so we don't reattach it */
588 	hdev->bpf.destroyed = true;
589 
590 	__hid_bpf_ops_destroy_device(hdev);
591 
592 	synchronize_srcu(&hdev->bpf.srcu);
593 	cleanup_srcu_struct(&hdev->bpf.srcu);
594 }
595 EXPORT_SYMBOL_GPL(hid_bpf_destroy_device);
596 
597 int hid_bpf_device_init(struct hid_device *hdev)
598 {
599 	INIT_LIST_HEAD(&hdev->bpf.prog_list);
600 	mutex_init(&hdev->bpf.prog_list_lock);
601 	return init_srcu_struct(&hdev->bpf.srcu);
602 }
603 EXPORT_SYMBOL_GPL(hid_bpf_device_init);
604 
605 static int __init hid_bpf_init(void)
606 {
607 	int err;
608 
609 	/* Note: if we exit with an error any time here, we would entirely break HID, which
610 	 * is probably not something we want. So we log an error and return success.
611 	 *
612 	 * This is not a big deal: nobody will be able to use the functionality.
613 	 */
614 
615 	err = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &hid_bpf_kfunc_set);
616 	if (err) {
617 		pr_warn("error while setting HID BPF tracing kfuncs: %d", err);
618 		return 0;
619 	}
620 
621 	err = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &hid_bpf_syscall_kfunc_set);
622 	if (err) {
623 		pr_warn("error while setting HID BPF syscall kfuncs: %d", err);
624 		return 0;
625 	}
626 
627 	return 0;
628 }
629 
630 late_initcall(hid_bpf_init);
631 MODULE_AUTHOR("Benjamin Tissoires");
632 MODULE_LICENSE("GPL");
633